llama-model.cpp 605 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058
  1. #include "llama-model.h"
  2. #include "llama-impl.h"
  3. #include "llama-mmap.h"
  4. #include "llama-batch.h"
  5. #include "llama-cparams.h"
  6. #include "llama-model-loader.h"
  7. #include "llama-kv-cache-unified.h"
  8. #include "llama-kv-cache-unified-iswa.h"
  9. #include "llama-kv-cache-recurrent.h"
  10. #include "ggml-cpp.h"
  11. #include <algorithm>
  12. #include <cassert>
  13. #include <cmath>
  14. #include <cfloat>
  15. #include <cstring>
  16. #include <cmath>
  17. #include <functional>
  18. #include <map>
  19. #include <regex>
  20. #include <sstream>
  21. #include <stdexcept>
  22. const char * llm_type_name(llm_type type) {
  23. switch (type) {
  24. case LLM_TYPE_14M: return "14M";
  25. case LLM_TYPE_17M: return "17M";
  26. case LLM_TYPE_22M: return "22M";
  27. case LLM_TYPE_33M: return "33M";
  28. case LLM_TYPE_60M: return "60M";
  29. case LLM_TYPE_70M: return "70M";
  30. case LLM_TYPE_80M: return "80M";
  31. case LLM_TYPE_109M: return "109M";
  32. case LLM_TYPE_137M: return "137M";
  33. case LLM_TYPE_160M: return "160M";
  34. case LLM_TYPE_190M: return "190M";
  35. case LLM_TYPE_220M: return "220M";
  36. case LLM_TYPE_250M: return "250M";
  37. case LLM_TYPE_270M: return "270M";
  38. case LLM_TYPE_335M: return "335M";
  39. case LLM_TYPE_410M: return "410M";
  40. case LLM_TYPE_450M: return "450M";
  41. case LLM_TYPE_475M: return "475M";
  42. case LLM_TYPE_770M: return "770M";
  43. case LLM_TYPE_780M: return "780M";
  44. case LLM_TYPE_0_5B: return "0.5B";
  45. case LLM_TYPE_0_6B: return "0.6B";
  46. case LLM_TYPE_1B: return "1B";
  47. case LLM_TYPE_1_3B: return "1.3B";
  48. case LLM_TYPE_1_4B: return "1.4B";
  49. case LLM_TYPE_1_5B: return "1.5B";
  50. case LLM_TYPE_1_6B: return "1.6B";
  51. case LLM_TYPE_1_7B: return "1.7B";
  52. case LLM_TYPE_1_8B: return "1.8B";
  53. case LLM_TYPE_2B: return "2B";
  54. case LLM_TYPE_2_8B: return "2.8B";
  55. case LLM_TYPE_2_9B: return "2.9B";
  56. case LLM_TYPE_3B: return "3B";
  57. case LLM_TYPE_4B: return "4B";
  58. case LLM_TYPE_6B: return "6B";
  59. case LLM_TYPE_6_9B: return "6.9B";
  60. case LLM_TYPE_7B: return "7B";
  61. case LLM_TYPE_8B: return "8B";
  62. case LLM_TYPE_9B: return "9B";
  63. case LLM_TYPE_11B: return "11B";
  64. case LLM_TYPE_12B: return "12B";
  65. case LLM_TYPE_13B: return "13B";
  66. case LLM_TYPE_14B: return "14B";
  67. case LLM_TYPE_15B: return "15B";
  68. case LLM_TYPE_16B: return "16B";
  69. case LLM_TYPE_20B: return "20B";
  70. case LLM_TYPE_27B: return "27B";
  71. case LLM_TYPE_30B: return "30B";
  72. case LLM_TYPE_32B: return "32B";
  73. case LLM_TYPE_34B: return "34B";
  74. case LLM_TYPE_35B: return "35B";
  75. case LLM_TYPE_40B: return "40B";
  76. case LLM_TYPE_65B: return "65B";
  77. case LLM_TYPE_70B: return "70B";
  78. case LLM_TYPE_142B: return "142B";
  79. case LLM_TYPE_236B: return "236B";
  80. case LLM_TYPE_290B: return "290B";
  81. case LLM_TYPE_314B: return "314B";
  82. case LLM_TYPE_405B: return "405B";
  83. case LLM_TYPE_671B: return "671B";
  84. case LLM_TYPE_SMALL: return "0.1B";
  85. case LLM_TYPE_MEDIUM: return "0.4B";
  86. case LLM_TYPE_LARGE: return "0.8B";
  87. case LLM_TYPE_XL: return "1.5B";
  88. case LLM_TYPE_A1_7B: return "A1.7B";
  89. case LLM_TYPE_A2_7B: return "A2.7B";
  90. case LLM_TYPE_8x7B: return "8x7B";
  91. case LLM_TYPE_8x22B: return "8x22B";
  92. case LLM_TYPE_16x12B: return "16x12B";
  93. case LLM_TYPE_16x3_8B: return "16x3.8B";
  94. case LLM_TYPE_10B_128x3_66B: return "10B+128x3.66B";
  95. case LLM_TYPE_57B_A14B: return "57B.A14B";
  96. case LLM_TYPE_17B_16E: return "17Bx16E (Scout)";
  97. case LLM_TYPE_17B_128E: return "17Bx128E (Maverick)";
  98. case LLM_TYPE_30B_A3B: return "30B.A3B";
  99. case LLM_TYPE_235B_A22B: return "235B.A22B";
  100. default: return "?B";
  101. }
  102. }
  103. static const char * llama_expert_gating_func_name(llama_expert_gating_func_type type) {
  104. switch (type) {
  105. case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: return "softmax";
  106. case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: return "sigmoid";
  107. default: return "unknown";
  108. }
  109. }
  110. static const std::map<llama_rope_scaling_type, const char *> LLAMA_ROPE_SCALING_TYPES = {
  111. { LLAMA_ROPE_SCALING_TYPE_NONE, "none" },
  112. { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" },
  113. { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" },
  114. { LLAMA_ROPE_SCALING_TYPE_LONGROPE, "longrope" },
  115. };
  116. std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type) {
  117. return LLAMA_ROPE_SCALING_TYPES.at(rope_scaling_type);
  118. }
  119. static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
  120. for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
  121. if (kv.second == name) {
  122. return (llama_rope_scaling_type) kv.first;
  123. }
  124. }
  125. return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
  126. }
  127. // checks if the weight tensor can be used with the specified buffer type and device
  128. static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w, ggml_op op, ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev) {
  129. GGML_ASSERT(w != nullptr);
  130. if (op == GGML_OP_NONE) {
  131. return true;
  132. }
  133. ggml_init_params params = {
  134. /*.mem_size =*/ ggml_tensor_overhead()*8,
  135. /*.mem_buffer =*/ NULL,
  136. /*.no_alloc =*/ true,
  137. };
  138. ggml_context_ptr ctx_ptr { ggml_init(params) };
  139. if (!ctx_ptr) {
  140. throw std::runtime_error(format("failed to create ggml context"));
  141. }
  142. ggml_context * ctx = ctx_ptr.get();
  143. ggml_tensor * op_tensor = nullptr;
  144. switch (op) {
  145. case GGML_OP_GET_ROWS:
  146. {
  147. ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512);
  148. op_tensor = ggml_get_rows(ctx, w, b);
  149. } break;
  150. case GGML_OP_MUL_MAT:
  151. {
  152. ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], 512, w->ne[2], w->ne[3]);
  153. op_tensor = ggml_mul_mat(ctx, w, b);
  154. } break;
  155. case GGML_OP_MUL_MAT_ID:
  156. {
  157. int n_expert_used = hparams.n_expert_used;
  158. ggml_tensor * b = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512);
  159. ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512);
  160. op_tensor = ggml_mul_mat_id(ctx, w, b, ids);
  161. } break;
  162. case GGML_OP_ADD:
  163. {
  164. ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
  165. op_tensor = ggml_add(ctx, a, w);
  166. } break;
  167. case GGML_OP_MUL:
  168. {
  169. ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
  170. op_tensor = ggml_mul(ctx, a, w);
  171. } break;
  172. case GGML_OP_DIV:
  173. {
  174. ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, w->ne[0]);
  175. op_tensor = ggml_div(ctx, a, w);
  176. } break;
  177. case GGML_OP_ROPE:
  178. {
  179. int n_embd_head = hparams.n_embd_head_v;
  180. int n_head = hparams.n_head();
  181. ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_embd_head, n_head, 512);
  182. ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512);
  183. op_tensor = ggml_rope_ext(
  184. ctx, a, b, w,
  185. 0, 0, 0, 0, 0,
  186. 0, 0, 0, 0
  187. );
  188. } break;
  189. case GGML_OP_SSM_CONV:
  190. {
  191. // FIXME
  192. ggml_tensor * conv_x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 12345, w->ne[1], 6789);
  193. op_tensor = ggml_ssm_conv(ctx, conv_x, w);
  194. } break;
  195. case GGML_OP_SSM_SCAN:
  196. {
  197. // FIXME
  198. const int64_t d_state = w->ne[0];
  199. const int64_t d_inner = w->ne[1];
  200. const int64_t n_seq_tokens = 512;
  201. const int64_t n_seqs = 1;
  202. ggml_tensor * s = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, d_inner, n_seqs);
  203. ggml_tensor * x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_inner, n_seq_tokens, n_seqs);
  204. ggml_tensor * dt = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_inner, n_seq_tokens, n_seqs);
  205. ggml_tensor * B = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, n_seq_tokens, n_seqs);
  206. ggml_tensor * C = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, n_seq_tokens, n_seqs);
  207. op_tensor = ggml_ssm_scan(ctx, s, x, dt, w, B, C);
  208. } break;
  209. case GGML_OP_RWKV_WKV6:
  210. {
  211. // FIXME
  212. const int64_t S = 123;
  213. const int64_t H = 123;
  214. const int64_t n_tokens = 123;
  215. const int64_t n_seqs = 123;
  216. ggml_tensor * k = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
  217. ggml_tensor * v = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
  218. ggml_tensor * r = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
  219. ggml_tensor * tf = w;
  220. ggml_tensor * td = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
  221. ggml_tensor * state = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, S, n_seqs, S, H);
  222. op_tensor = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, state);
  223. } break;
  224. case GGML_OP_IM2COL:
  225. {
  226. const int n_embd = hparams.n_embd;
  227. ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd, w->ne[1], 1, 1);
  228. op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16);
  229. } break;
  230. default:
  231. GGML_ABORT("%s: missing test for op %s for tensor %s", __func__, ggml_op_name(op), w->name);
  232. }
  233. // create a temporary dummy buffer for the weight so that supports_op can check the buffer type
  234. GGML_ASSERT(w->buffer == nullptr);
  235. w->buffer = ggml_backend_buft_alloc_buffer(buft, 0);
  236. bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
  237. ggml_backend_buffer_free(w->buffer);
  238. w->buffer = nullptr;
  239. return op_supported;
  240. }
  241. // lists of buffer types used for each layer
  242. using buft_list_t = std::vector<std::pair<ggml_backend_dev_t, ggml_backend_buffer_type_t>>;
  243. // find the first buffer type in the list that can use the tensor
  244. static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hparams, ggml_tensor * tensor, ggml_op op, const buft_list_t & buft_list) {
  245. GGML_ASSERT(!buft_list.empty());
  246. for (const auto & cur : buft_list) {
  247. ggml_backend_dev_t cur_dev = cur.first;
  248. ggml_backend_buffer_type_t cur_buft = cur.second;
  249. if (weight_buft_supported(hparams, tensor, op, cur_buft, cur_dev)) {
  250. return cur_buft;
  251. }
  252. }
  253. return nullptr;
  254. }
  255. // CPU: ACCEL -> GPU host -> CPU extra -> CPU
  256. static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices) {
  257. buft_list_t buft_list;
  258. // add ACCEL buffer types
  259. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  260. ggml_backend_dev_t dev = ggml_backend_dev_get(i);
  261. if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
  262. auto * buft = ggml_backend_dev_buffer_type(dev);
  263. // skip
  264. if (buft != ggml_backend_cpu_buffer_type()) {
  265. buft_list.emplace_back(dev, buft);
  266. }
  267. }
  268. }
  269. // add a host buffer type
  270. // storing the tensors in a host buffer is useful when the processing of large batches
  271. // is offloaded to a GPU device, since it reduces the time spent on data transfers
  272. // generally, this will be done using the first device in the list
  273. // a better approach would be to handle this on a weight-by-weight basis using the offload_op
  274. // function of the device to determine if it would benefit from being stored in a host buffer
  275. for (auto * dev : devices) {
  276. ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev);
  277. if (buft) {
  278. buft_list.emplace_back(dev, buft);
  279. break;
  280. }
  281. }
  282. // add extra buffer types, only if no GPU device is present
  283. // ref: https://github.com/ggml-org/llama.cpp/issues/12481#issuecomment-2743136094
  284. auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
  285. if (cpu_dev == nullptr) {
  286. throw std::runtime_error(format("%s: no CPU backend found", __func__));
  287. }
  288. auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
  289. auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
  290. ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts");
  291. if (ggml_backend_dev_get_extra_bufts_fn) {
  292. ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev);
  293. while (extra_bufts && *extra_bufts) {
  294. buft_list.emplace_back(cpu_dev, *extra_bufts);
  295. ++extra_bufts;
  296. }
  297. }
  298. // add the CPU buffer type
  299. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  300. ggml_backend_dev_t dev = ggml_backend_dev_get(i);
  301. if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU) {
  302. buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
  303. }
  304. }
  305. return buft_list;
  306. }
  307. // GPU: split if LLAMA_SPLIT_MODE_ROW -> GPU
  308. static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode split_mode, const float * tensor_split) {
  309. buft_list_t buft_list;
  310. // add the device split buffer type if requested and available
  311. if (split_mode == LLAMA_SPLIT_MODE_ROW) {
  312. ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
  313. auto ggml_backend_split_buffer_type_fn = (ggml_backend_split_buffer_type_t)
  314. ggml_backend_reg_get_proc_address(reg, "ggml_backend_split_buffer_type");
  315. if (ggml_backend_split_buffer_type_fn) {
  316. size_t dev_index = [&]() {
  317. auto * reg = ggml_backend_dev_backend_reg(dev);
  318. for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); ++i) {
  319. if (ggml_backend_reg_dev_get(reg, i) == dev) {
  320. return i;
  321. }
  322. }
  323. throw std::runtime_error(format("device %s not found in its backend reg", ggml_backend_dev_name(dev)));
  324. }();
  325. auto * buft = ggml_backend_split_buffer_type_fn(dev_index, tensor_split);
  326. if (buft != nullptr) {
  327. buft_list.emplace_back(dev, buft);
  328. }
  329. }
  330. }
  331. // add the device default buffer type
  332. buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
  333. return buft_list;
  334. }
  335. struct llama_model::impl {
  336. impl() {}
  337. ~impl() {}
  338. uint64_t n_elements = 0;
  339. size_t n_bytes = 0;
  340. std::string desc_str;
  341. // model memory mapped files
  342. llama_mmaps mappings;
  343. // objects representing data potentially being locked in memory
  344. llama_mlocks mlock_bufs;
  345. llama_mlocks mlock_mmaps;
  346. // contexts where the model tensors metadata is stored
  347. std::vector<ggml_context_ptr> ctxs;
  348. // the model memory buffers for the tensor data
  349. std::vector<ggml_backend_buffer_ptr> bufs;
  350. buft_list_t cpu_buft_list;
  351. std::map<ggml_backend_dev_t, buft_list_t> gpu_buft_list;
  352. struct layer_dev {
  353. ggml_backend_dev_t dev;
  354. buft_list_t * buft_list;
  355. };
  356. layer_dev dev_input = {};
  357. layer_dev dev_output = {};
  358. std::vector<layer_dev> dev_layer;
  359. bool has_tensor_overrides;
  360. };
  361. llama_model::llama_model(const llama_model_params & params) : params(params), pimpl(std::make_unique<impl>()) {
  362. pimpl->has_tensor_overrides = params.tensor_buft_overrides && params.tensor_buft_overrides[0].pattern;
  363. }
  364. llama_model::~llama_model() {}
  365. void llama_model::load_stats(llama_model_loader & ml) {
  366. pimpl->n_elements = ml.n_elements;
  367. pimpl->n_bytes = ml.n_bytes;
  368. }
  369. void llama_model::load_arch(llama_model_loader & ml) {
  370. arch = ml.get_arch();
  371. if (arch == LLM_ARCH_UNKNOWN) {
  372. throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
  373. }
  374. }
  375. void llama_model::load_hparams(llama_model_loader & ml) {
  376. const gguf_context * ctx = ml.meta.get();
  377. // get metadata as string
  378. for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
  379. gguf_type type = gguf_get_kv_type(ctx, i);
  380. if (type == GGUF_TYPE_ARRAY) {
  381. continue;
  382. }
  383. const char * name = gguf_get_key(ctx, i);
  384. const std::string value = gguf_kv_to_str(ctx, i);
  385. gguf_kv.emplace(name, value);
  386. }
  387. // get general kv
  388. ml.get_key(LLM_KV_GENERAL_NAME, name, false);
  389. // everything past this point is not vocab-related
  390. if (hparams.vocab_only) {
  391. return;
  392. }
  393. ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
  394. ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
  395. ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer);
  396. ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
  397. ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
  398. if (arch == LLM_ARCH_WAVTOKENIZER_DEC) {
  399. ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features);
  400. ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd);
  401. ml.get_key(LLM_KV_POSNET_BLOCK_COUNT, hparams.posnet.n_layer);
  402. ml.get_key(LLM_KV_CONVNEXT_EMBEDDING_LENGTH, hparams.convnext.n_embd);
  403. ml.get_key(LLM_KV_CONVNEXT_BLOCK_COUNT, hparams.convnext.n_layer);
  404. }
  405. GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
  406. GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
  407. if (hparams.n_expert > 0) {
  408. GGML_ASSERT(hparams.n_expert_used > 0);
  409. } else {
  410. GGML_ASSERT(hparams.n_expert_used == 0);
  411. }
  412. std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0);
  413. std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
  414. std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0);
  415. std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0);
  416. std::fill(hparams.swa_layers.begin(), hparams.swa_layers.end(), 0);
  417. ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer, false);
  418. ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);
  419. // n_head_kv is optional, default to n_head
  420. hparams.n_head_kv_arr = hparams.n_head_arr;
  421. ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, hparams.n_layer, false);
  422. bool rope_finetuned = false;
  423. ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
  424. hparams.rope_finetuned = rope_finetuned;
  425. hparams.n_ctx_orig_yarn = hparams.n_ctx_train;
  426. ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false);
  427. // rope_freq_base (optional)
  428. hparams.rope_freq_base_train = 10000.0f;
  429. ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
  430. std::string rope_scaling("linear");
  431. ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
  432. hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
  433. GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED);
  434. // rope_freq_scale (inverse of the kv) is optional
  435. float ropescale = 0.0f;
  436. if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
  437. // try the old key name
  438. ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
  439. }
  440. hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
  441. // by default assume that the sliding-window layers use the same scaling type as the non-sliding-window layers
  442. hparams.rope_freq_base_train_swa = hparams.rope_freq_base_train;
  443. hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
  444. ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false);
  445. // non-transformer models do not have attention heads
  446. if (hparams.n_head() > 0) {
  447. // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
  448. // gpt-j n_rot = rotary_dim
  449. hparams.n_embd_head_k = hparams.n_embd / hparams.n_head();
  450. ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
  451. hparams.n_embd_head_v = hparams.n_embd / hparams.n_head();
  452. ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
  453. // sanity check for n_rot (optional)
  454. hparams.n_rot = hparams.n_embd_head_k;
  455. ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
  456. if (arch == LLM_ARCH_LLAMA || arch == LLM_ARCH_DECI || arch == LLM_ARCH_FALCON) {
  457. if (hparams.n_rot != hparams.n_embd_head_k) {
  458. throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k));
  459. }
  460. }
  461. } else {
  462. hparams.n_rot = 0;
  463. hparams.n_embd_head_k = 0;
  464. hparams.n_embd_head_v = 0;
  465. }
  466. // for differentiating model types
  467. uint32_t n_vocab = 0;
  468. ml.get_key(LLM_KV_VOCAB_SIZE, n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, n_vocab, false);
  469. // for classifier models
  470. ml.get_arr(LLM_KV_CLASSIFIER_OUTPUT_LABELS, classifier_labels, false);
  471. if (!classifier_labels.empty()) {
  472. hparams.n_cls_out = classifier_labels.size();
  473. }
  474. // arch-specific KVs
  475. switch (arch) {
  476. case LLM_ARCH_LLAMA:
  477. {
  478. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  479. if (hparams.n_expert == 8) {
  480. switch (hparams.n_layer) {
  481. case 32: type = LLM_TYPE_8x7B; break;
  482. case 56: type = LLM_TYPE_8x22B; break;
  483. default: type = LLM_TYPE_UNKNOWN;
  484. }
  485. } else {
  486. switch (hparams.n_layer) {
  487. case 16: type = LLM_TYPE_1B; break; // Llama 3.2 1B
  488. case 22: type = LLM_TYPE_1B; break;
  489. case 26: type = LLM_TYPE_3B; break;
  490. case 28: type = LLM_TYPE_3B; break; // Llama 3.2 3B
  491. // granite uses a vocab with len 49152
  492. case 32: type = n_vocab == 49152 ? LLM_TYPE_3B : (n_vocab < 40000 ? LLM_TYPE_7B : LLM_TYPE_8B); break;
  493. case 36: type = LLM_TYPE_8B; break; // granite
  494. case 40: type = LLM_TYPE_13B; break;
  495. case 48: type = LLM_TYPE_34B; break;
  496. case 60: type = LLM_TYPE_30B; break;
  497. case 80: type = hparams.n_head() == hparams.n_head_kv() ? LLM_TYPE_65B : LLM_TYPE_70B; break;
  498. default: type = LLM_TYPE_UNKNOWN;
  499. }
  500. }
  501. } break;
  502. case LLM_ARCH_LLAMA4:
  503. {
  504. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  505. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
  506. ml.get_key(LLM_KV_INTERLEAVE_MOE_LAYER_STEP, hparams.n_moe_layer_step);
  507. hparams.swa_type = LLAMA_SWA_TYPE_CHUNKED;
  508. hparams.n_swa = 8192; // should this be a gguf kv? currently it's the same for Scout and Maverick
  509. hparams.set_swa_pattern(4); // pattern: 3 chunked - 1 full
  510. switch (hparams.n_expert) {
  511. case 16: type = LLM_TYPE_17B_16E; break;
  512. case 128: type = LLM_TYPE_17B_128E; break;
  513. default: type = LLM_TYPE_UNKNOWN;
  514. }
  515. if (type == LLM_TYPE_17B_128E) {
  516. hparams.use_kq_norm = false;
  517. }
  518. } break;
  519. case LLM_ARCH_DECI:
  520. {
  521. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  522. switch (hparams.n_layer) {
  523. case 32: type = LLM_TYPE_7B; break;
  524. case 80: type = LLM_TYPE_70B; break;
  525. case 162: type = LLM_TYPE_405B; break;
  526. default: type = LLM_TYPE_UNKNOWN;
  527. }
  528. } break;
  529. case LLM_ARCH_MINICPM:
  530. {
  531. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  532. ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
  533. ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
  534. ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
  535. switch (hparams.n_layer) {
  536. case 52: type = LLM_TYPE_1B; break;
  537. case 40: type = LLM_TYPE_2B; break;
  538. default: type = LLM_TYPE_UNKNOWN;
  539. }
  540. } break;
  541. case LLM_ARCH_MINICPM3:
  542. {
  543. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  544. ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
  545. ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
  546. switch (hparams.n_layer) {
  547. case 62: type = LLM_TYPE_4B; break;
  548. default: type = LLM_TYPE_UNKNOWN;
  549. }
  550. } break;
  551. case LLM_ARCH_GROK:
  552. {
  553. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  554. switch (hparams.n_layer) {
  555. case 64: type = LLM_TYPE_314B; break;
  556. default: type = LLM_TYPE_UNKNOWN;
  557. }
  558. } break;
  559. case LLM_ARCH_FALCON:
  560. {
  561. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  562. switch (hparams.n_layer) {
  563. case 32: type = LLM_TYPE_7B; break;
  564. case 60: type = LLM_TYPE_40B; break;
  565. default: type = LLM_TYPE_UNKNOWN;
  566. }
  567. } break;
  568. case LLM_ARCH_BAICHUAN:
  569. {
  570. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  571. switch (hparams.n_layer) {
  572. case 32: type = LLM_TYPE_7B; break;
  573. case 40: type = LLM_TYPE_13B; break;
  574. default: type = LLM_TYPE_UNKNOWN;
  575. }
  576. if (type == LLM_TYPE_13B) {
  577. // TODO: become GGUF KV parameter
  578. hparams.f_max_alibi_bias = 8.0f;
  579. }
  580. } break;
  581. case LLM_ARCH_STARCODER:
  582. {
  583. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  584. switch (hparams.n_layer) {
  585. case 24: type = LLM_TYPE_1B; break;
  586. case 36: type = LLM_TYPE_3B; break;
  587. case 42: type = LLM_TYPE_7B; break;
  588. case 40: type = LLM_TYPE_15B; break;
  589. default: type = LLM_TYPE_UNKNOWN;
  590. }
  591. } break;
  592. case LLM_ARCH_REFACT:
  593. {
  594. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  595. switch (hparams.n_layer) {
  596. case 32: type = LLM_TYPE_1B; break;
  597. default: type = LLM_TYPE_UNKNOWN;
  598. }
  599. // TODO: become GGUF KV parameter
  600. hparams.f_max_alibi_bias = 8.0f;
  601. } break;
  602. case LLM_ARCH_BERT:
  603. {
  604. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  605. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  606. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
  607. switch (hparams.n_layer) {
  608. case 3:
  609. type = LLM_TYPE_17M; break; // bge-micro
  610. case 6:
  611. type = LLM_TYPE_22M; break; // MiniLM-L6
  612. case 12:
  613. switch (hparams.n_embd) {
  614. case 384: type = LLM_TYPE_33M; break; // MiniLM-L12, bge-small
  615. case 768: type = LLM_TYPE_109M; break; // bge-base
  616. default: type = LLM_TYPE_UNKNOWN;
  617. } break;
  618. case 24:
  619. type = LLM_TYPE_335M; break; // bge-large
  620. default: type = LLM_TYPE_UNKNOWN;
  621. }
  622. } break;
  623. case LLM_ARCH_JINA_BERT_V2:
  624. {
  625. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  626. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  627. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
  628. hparams.f_max_alibi_bias = 8.0f;
  629. switch (hparams.n_layer) {
  630. case 4: type = LLM_TYPE_33M; break; // jina-embeddings-small
  631. case 12: type = LLM_TYPE_137M; break; // jina-embeddings-base
  632. default: type = LLM_TYPE_UNKNOWN;
  633. }
  634. } break;
  635. case LLM_ARCH_NOMIC_BERT:
  636. case LLM_ARCH_NOMIC_BERT_MOE:
  637. {
  638. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  639. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  640. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
  641. ml.get_key(LLM_KV_MOE_EVERY_N_LAYERS, hparams.moe_every_n_layers, 0);
  642. if (hparams.n_layer == 12 && hparams.n_embd == 768) {
  643. if (arch == LLM_ARCH_NOMIC_BERT) {
  644. type = LLM_TYPE_137M;
  645. } else if (arch == LLM_ARCH_NOMIC_BERT_MOE && hparams.moe_every_n_layers == 2) {
  646. type = LLM_TYPE_475M;
  647. }
  648. }
  649. } break;
  650. case LLM_ARCH_BLOOM:
  651. {
  652. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  653. switch (hparams.n_layer) {
  654. case 24: type = LLM_TYPE_1B; break;
  655. case 30:
  656. switch (hparams.n_embd) {
  657. case 2560: type = LLM_TYPE_3B; break;
  658. case 4096: type = LLM_TYPE_7B; break;
  659. default: type = LLM_TYPE_UNKNOWN;
  660. } break;
  661. default: type = LLM_TYPE_UNKNOWN;
  662. }
  663. // TODO: become GGUF KV parameter
  664. hparams.f_max_alibi_bias = 8.0f;
  665. } break;
  666. case LLM_ARCH_MPT:
  667. {
  668. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  669. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  670. ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
  671. switch (hparams.n_layer) {
  672. case 32: type = LLM_TYPE_7B; break;
  673. case 48: type = LLM_TYPE_30B; break;
  674. default: type = LLM_TYPE_UNKNOWN;
  675. }
  676. } break;
  677. case LLM_ARCH_STABLELM:
  678. {
  679. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  680. switch (hparams.n_layer) {
  681. case 24: type = LLM_TYPE_1B; break;
  682. case 32: type = LLM_TYPE_3B; break;
  683. case 40: type = LLM_TYPE_12B; break;
  684. default: type = LLM_TYPE_UNKNOWN;
  685. }
  686. } break;
  687. case LLM_ARCH_QWEN:
  688. {
  689. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  690. switch (hparams.n_layer) {
  691. case 32: type = LLM_TYPE_7B; break;
  692. case 40: type = LLM_TYPE_13B; break;
  693. default: type = LLM_TYPE_UNKNOWN;
  694. }
  695. } break;
  696. case LLM_ARCH_QWEN2VL:
  697. {
  698. ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true);
  699. }
  700. // fall through
  701. case LLM_ARCH_QWEN2:
  702. {
  703. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
  704. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  705. switch (hparams.n_layer) {
  706. case 24: type = hparams.n_embd == 1024 ? LLM_TYPE_0_5B : LLM_TYPE_1B; break;
  707. case 28: type = hparams.n_embd == 1536 ? LLM_TYPE_1_5B : LLM_TYPE_7B; break;
  708. case 32: type = LLM_TYPE_7B; break;
  709. case 36: type = LLM_TYPE_3B; break;
  710. case 40: type = hparams.n_head() == 20 ? LLM_TYPE_4B : LLM_TYPE_13B; break;
  711. case 48: type = LLM_TYPE_14B; break;
  712. case 64: type = LLM_TYPE_32B; break;
  713. case 80: type = LLM_TYPE_70B; break;
  714. default: type = LLM_TYPE_UNKNOWN;
  715. }
  716. } break;
  717. case LLM_ARCH_QWEN2MOE:
  718. {
  719. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
  720. ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
  721. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  722. switch (hparams.n_layer) {
  723. case 24: type = LLM_TYPE_A2_7B; break;
  724. case 28: type = LLM_TYPE_57B_A14B; break;
  725. default: type = LLM_TYPE_UNKNOWN;
  726. }
  727. } break;
  728. case LLM_ARCH_QWEN3:
  729. {
  730. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  731. switch (hparams.n_layer) {
  732. case 28: type = hparams.n_embd == 1024 ? LLM_TYPE_0_6B : LLM_TYPE_1_7B; break;
  733. case 36: type = hparams.n_embd == 2560 ? LLM_TYPE_4B : LLM_TYPE_8B; break;
  734. case 40: type = LLM_TYPE_14B; break;
  735. case 64: type = LLM_TYPE_32B; break;
  736. default: type = LLM_TYPE_UNKNOWN;
  737. }
  738. } break;
  739. case LLM_ARCH_QWEN3MOE:
  740. {
  741. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
  742. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  743. switch (hparams.n_layer) {
  744. case 48: type = LLM_TYPE_30B_A3B; break;
  745. case 94: type = LLM_TYPE_235B_A22B; break;
  746. default: type = LLM_TYPE_UNKNOWN;
  747. }
  748. } break;
  749. case LLM_ARCH_PHI2:
  750. {
  751. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  752. switch (hparams.n_layer) {
  753. case 24: type = LLM_TYPE_1B; break;
  754. case 32: type = LLM_TYPE_3B; break;
  755. default: type = LLM_TYPE_UNKNOWN;
  756. }
  757. } break;
  758. case LLM_ARCH_PHI3:
  759. {
  760. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  761. switch (hparams.n_layer) {
  762. case 24: type = LLM_TYPE_1B; break;
  763. case 32: type = LLM_TYPE_3B; break;
  764. case 40: type = LLM_TYPE_14B; break;
  765. default: type = LLM_TYPE_UNKNOWN;
  766. }
  767. const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
  768. if (found_swa && hparams.n_swa > 0) {
  769. LLAMA_LOG_WARN("%s: Phi SWA is currently disabled - results might be suboptimal for some models (see %s)\n",
  770. __func__, "https://github.com/ggml-org/llama.cpp/pull/13676");
  771. // TODO: fix conversion scripts to correctly populate `n_swa` and `n_swa_pattern`
  772. hparams.swa_type = LLAMA_SWA_TYPE_NONE;
  773. hparams.n_swa = 0;
  774. hparams.set_swa_pattern(1);
  775. }
  776. } break;
  777. case LLM_ARCH_PHIMOE:
  778. {
  779. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  780. switch (hparams.n_layer) {
  781. case 32: type = LLM_TYPE_16x3_8B; break;
  782. default: type = LLM_TYPE_UNKNOWN;
  783. }
  784. } break;
  785. case LLM_ARCH_PLAMO:
  786. {
  787. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  788. switch (hparams.n_layer) {
  789. case 40: type = LLM_TYPE_13B; break;
  790. default: type = LLM_TYPE_UNKNOWN;
  791. }
  792. } break;
  793. case LLM_ARCH_GPT2:
  794. {
  795. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  796. switch (hparams.n_layer) {
  797. case 12: type = LLM_TYPE_SMALL; break;
  798. case 24: type = LLM_TYPE_MEDIUM; break;
  799. case 36: type = LLM_TYPE_LARGE; break;
  800. case 48: type = LLM_TYPE_XL; break;
  801. default: type = LLM_TYPE_UNKNOWN;
  802. }
  803. } break;
  804. case LLM_ARCH_CODESHELL:
  805. {
  806. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  807. switch (hparams.n_layer) {
  808. case 42: type = LLM_TYPE_7B; break;
  809. default: type = LLM_TYPE_UNKNOWN;
  810. }
  811. } break;
  812. case LLM_ARCH_ORION:
  813. {
  814. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  815. switch (hparams.n_layer) {
  816. case 40: type = LLM_TYPE_14B; break;
  817. default: type = LLM_TYPE_UNKNOWN;
  818. }
  819. } break;
  820. case LLM_ARCH_INTERNLM2:
  821. {
  822. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  823. switch (hparams.n_layer) {
  824. case 32: type = LLM_TYPE_7B; break;
  825. case 48: type = LLM_TYPE_20B; break;
  826. default: type = LLM_TYPE_UNKNOWN;
  827. }
  828. } break;
  829. case LLM_ARCH_GEMMA:
  830. {
  831. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  832. switch (hparams.n_layer) {
  833. case 18: type = LLM_TYPE_2B; break;
  834. case 28: type = LLM_TYPE_7B; break;
  835. default: type = LLM_TYPE_UNKNOWN;
  836. }
  837. } break;
  838. case LLM_ARCH_GEMMA2:
  839. {
  840. hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
  841. hparams.n_swa = 4096; // default value of gemma 2
  842. hparams.set_swa_pattern(2);
  843. hparams.attn_soft_cap = true;
  844. ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
  845. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  846. ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false);
  847. ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
  848. switch (hparams.n_layer) {
  849. case 26: type = LLM_TYPE_2B; break;
  850. case 42: type = LLM_TYPE_9B; break;
  851. case 46: type = LLM_TYPE_27B; break;
  852. default: type = LLM_TYPE_UNKNOWN;
  853. }
  854. // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/config.py#L173
  855. hparams.f_attention_scale = type == LLM_TYPE_27B
  856. ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0)))
  857. : 1.0f / std::sqrt(float(hparams.n_embd_head_k));
  858. } break;
  859. case LLM_ARCH_GEMMA3:
  860. {
  861. hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
  862. hparams.set_swa_pattern(6);
  863. hparams.rope_freq_base_train_swa = 10000.0f;
  864. hparams.rope_freq_scale_train_swa = 1.0f;
  865. ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
  866. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  867. switch (hparams.n_layer) {
  868. case 26: type = LLM_TYPE_1B; break;
  869. case 34: type = LLM_TYPE_4B; break;
  870. case 48: type = LLM_TYPE_12B; break;
  871. case 62: type = LLM_TYPE_27B; break;
  872. default: type = LLM_TYPE_UNKNOWN;
  873. }
  874. // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/config.py#L289
  875. hparams.f_attention_scale = type == LLM_TYPE_27B
  876. ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0)))
  877. : 1.0f / std::sqrt(float(hparams.n_embd_head_k));
  878. } break;
  879. case LLM_ARCH_STARCODER2:
  880. {
  881. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  882. switch (hparams.n_layer) {
  883. case 30: type = LLM_TYPE_3B; break;
  884. case 32: type = LLM_TYPE_7B; break;
  885. case 40: type = LLM_TYPE_15B; break;
  886. case 52: type = LLM_TYPE_20B; break; // granite
  887. case 88: type = LLM_TYPE_34B; break; // granite
  888. default: type = LLM_TYPE_UNKNOWN;
  889. }
  890. } break;
  891. case LLM_ARCH_MAMBA:
  892. {
  893. ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv);
  894. ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner);
  895. ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state);
  896. ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
  897. ml.get_key(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms, false);
  898. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  899. switch (hparams.n_layer) {
  900. case 24:
  901. switch (hparams.n_embd) {
  902. case 768: type = LLM_TYPE_SMALL; break;
  903. default: type = LLM_TYPE_UNKNOWN;
  904. } break;
  905. case 48:
  906. switch (hparams.n_embd) {
  907. case 1024: type = LLM_TYPE_MEDIUM; break;
  908. case 1536: type = LLM_TYPE_LARGE; break;
  909. case 2048: type = LLM_TYPE_XL; break;
  910. default: type = LLM_TYPE_UNKNOWN;
  911. } break;
  912. case 64:
  913. switch (hparams.n_embd) {
  914. case 2560: type = LLM_TYPE_3B; break;
  915. default: type = LLM_TYPE_UNKNOWN;
  916. } break;
  917. default: type = LLM_TYPE_UNKNOWN;
  918. }
  919. } break;
  920. case LLM_ARCH_XVERSE:
  921. {
  922. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  923. switch (hparams.n_layer) {
  924. case 32: type = LLM_TYPE_7B; break;
  925. case 40: type = LLM_TYPE_13B; break;
  926. case 80: type = LLM_TYPE_65B; break;
  927. default: type = LLM_TYPE_UNKNOWN;
  928. }
  929. } break;
  930. case LLM_ARCH_COMMAND_R:
  931. {
  932. ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
  933. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  934. switch (hparams.n_layer) {
  935. case 40: type = LLM_TYPE_35B; break;
  936. default: type = LLM_TYPE_UNKNOWN;
  937. }
  938. } break;
  939. case LLM_ARCH_COHERE2:
  940. {
  941. hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
  942. hparams.set_swa_pattern(4);
  943. ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
  944. ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
  945. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  946. switch (hparams.n_layer) {
  947. case 32: type = LLM_TYPE_8B; break;
  948. default: type = LLM_TYPE_UNKNOWN;
  949. }
  950. } break;
  951. case LLM_ARCH_DBRX:
  952. {
  953. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  954. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv);
  955. switch (hparams.n_layer) {
  956. case 40: type = LLM_TYPE_16x12B; break;
  957. default: type = LLM_TYPE_UNKNOWN;
  958. }
  959. } break;
  960. case LLM_ARCH_OLMO:
  961. {
  962. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  963. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  964. switch (hparams.n_layer) {
  965. case 22: type = LLM_TYPE_1B; break;
  966. case 32: type = LLM_TYPE_7B; break;
  967. case 80: type = LLM_TYPE_70B; break;
  968. default: type = LLM_TYPE_UNKNOWN;
  969. }
  970. } break;
  971. case LLM_ARCH_OLMO2:
  972. {
  973. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  974. switch (hparams.n_layer) {
  975. case 16: type = LLM_TYPE_1B; break;
  976. case 32: type = LLM_TYPE_7B; break;
  977. case 40: type = LLM_TYPE_13B; break;
  978. case 64: type = LLM_TYPE_32B; break;
  979. default: type = LLM_TYPE_UNKNOWN;
  980. }
  981. } break;
  982. case LLM_ARCH_OLMOE:
  983. {
  984. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  985. switch (hparams.n_layer) {
  986. case 16: type = LLM_TYPE_A1_7B; break;
  987. default: type = LLM_TYPE_UNKNOWN;
  988. }
  989. } break;
  990. case LLM_ARCH_OPENELM:
  991. {
  992. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  993. switch (hparams.n_layer) {
  994. case 16: type = LLM_TYPE_270M; break;
  995. case 20: type = LLM_TYPE_450M; break;
  996. case 28: type = LLM_TYPE_1B; break;
  997. case 36: type = LLM_TYPE_3B; break;
  998. default: type = LLM_TYPE_UNKNOWN;
  999. }
  1000. } break;
  1001. case LLM_ARCH_GPTNEOX:
  1002. {
  1003. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  1004. ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res);
  1005. switch (hparams.n_layer) {
  1006. case 6:
  1007. switch (hparams.n_ff()) {
  1008. case 512: type = LLM_TYPE_14M; break;
  1009. case 2048: type = LLM_TYPE_70M; break;
  1010. default: type = LLM_TYPE_UNKNOWN;
  1011. } break;
  1012. case 12:
  1013. switch (hparams.n_ff()) {
  1014. case 3072: type = LLM_TYPE_160M; break;
  1015. default: type = LLM_TYPE_UNKNOWN;
  1016. } break;
  1017. case 16:
  1018. switch (hparams.n_ff()) {
  1019. case 8192: type = LLM_TYPE_1B; break;
  1020. default: type = LLM_TYPE_UNKNOWN;
  1021. } break;
  1022. case 24:
  1023. switch (hparams.n_ff()) {
  1024. case 4096: type = LLM_TYPE_410M; break;
  1025. case 8192: type = LLM_TYPE_1_4B; break;
  1026. default: type = LLM_TYPE_UNKNOWN;
  1027. } break;
  1028. case 32:
  1029. switch (hparams.n_ff()) {
  1030. case 10240: type = LLM_TYPE_2_8B; break;
  1031. case 16384: type = LLM_TYPE_6_9B; break;
  1032. default: type = LLM_TYPE_UNKNOWN;
  1033. } break;
  1034. case 36:
  1035. switch (hparams.n_ff()) {
  1036. case 20480: type = LLM_TYPE_12B; break;
  1037. default: type = LLM_TYPE_UNKNOWN;
  1038. } break;
  1039. case 44:
  1040. switch (hparams.n_ff()) {
  1041. case 24576: type = LLM_TYPE_20B; break;
  1042. default: type = LLM_TYPE_UNKNOWN;
  1043. } break;
  1044. default: type = LLM_TYPE_UNKNOWN;
  1045. }
  1046. } break;
  1047. case LLM_ARCH_ARCTIC:
  1048. {
  1049. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1050. if (hparams.n_expert == 128) {
  1051. switch (hparams.n_layer) {
  1052. case 35: type = LLM_TYPE_10B_128x3_66B; break;
  1053. default: type = LLM_TYPE_UNKNOWN;
  1054. }
  1055. } else {
  1056. type = LLM_TYPE_UNKNOWN;
  1057. }
  1058. } break;
  1059. case LLM_ARCH_DEEPSEEK:
  1060. {
  1061. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1062. ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
  1063. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
  1064. ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
  1065. ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
  1066. switch (hparams.n_layer) {
  1067. case 28: type = LLM_TYPE_20B; break;
  1068. default: type = LLM_TYPE_UNKNOWN;
  1069. }
  1070. } break;
  1071. case LLM_ARCH_DEEPSEEK2:
  1072. {
  1073. bool is_lite = (hparams.n_layer == 27);
  1074. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1075. ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
  1076. if (!is_lite) {
  1077. ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
  1078. }
  1079. ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
  1080. ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH_MLA, hparams.n_embd_head_k_mla, false);
  1081. ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH_MLA, hparams.n_embd_head_v_mla, false);
  1082. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
  1083. ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
  1084. ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
  1085. ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false);
  1086. ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false);
  1087. if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
  1088. // for compatibility with existing DeepSeek V2 and V2.5 GGUFs
  1089. // that have no expert_gating_func model parameter set
  1090. hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX;
  1091. }
  1092. ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul);
  1093. switch (hparams.n_layer) {
  1094. case 27: type = LLM_TYPE_16B; break;
  1095. case 60: type = LLM_TYPE_236B; break;
  1096. case 61: type = LLM_TYPE_671B; break;
  1097. default: type = LLM_TYPE_UNKNOWN;
  1098. }
  1099. } break;
  1100. case LLM_ARCH_PLM:
  1101. {
  1102. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1103. ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
  1104. switch (hparams.n_layer) {
  1105. case 32: type = LLM_TYPE_1_8B; break;
  1106. default: type = LLM_TYPE_UNKNOWN;
  1107. }
  1108. } break;
  1109. case LLM_ARCH_CHATGLM:
  1110. {
  1111. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1112. switch (hparams.n_layer) {
  1113. case 28: {
  1114. if (hparams.n_head(0) == 16) {
  1115. type = LLM_TYPE_1_5B;
  1116. } else {
  1117. type = LLM_TYPE_6B;
  1118. }
  1119. } break;
  1120. case 40: {
  1121. if (hparams.n_head(0) == 24) {
  1122. type = LLM_TYPE_4B;
  1123. } else {
  1124. type = LLM_TYPE_9B;
  1125. }
  1126. } break;
  1127. default: type = LLM_TYPE_UNKNOWN;
  1128. }
  1129. } break;
  1130. case LLM_ARCH_GLM4:
  1131. {
  1132. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1133. switch (hparams.n_layer) {
  1134. case 40: type = LLM_TYPE_9B; break;
  1135. case 61: type = LLM_TYPE_32B; break;
  1136. default: type = LLM_TYPE_UNKNOWN;
  1137. }
  1138. } break;
  1139. case LLM_ARCH_BITNET:
  1140. {
  1141. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1142. switch (hparams.n_layer) {
  1143. case 26: type = LLM_TYPE_3B; break;
  1144. default: type = LLM_TYPE_UNKNOWN;
  1145. }
  1146. } break;
  1147. case LLM_ARCH_T5:
  1148. {
  1149. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1150. ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
  1151. uint32_t dec_start_token_id;
  1152. if (ml.get_key(LLM_KV_DECODER_START_TOKEN_ID, dec_start_token_id, false)) {
  1153. hparams.dec_start_token_id = dec_start_token_id;
  1154. }
  1155. switch (hparams.n_layer) {
  1156. case 6: type = LLM_TYPE_60M; break; // t5-small
  1157. case 8: type = LLM_TYPE_80M; break; // flan-t5-small
  1158. case 12:
  1159. switch (hparams.n_ff()) {
  1160. case 3072: type = LLM_TYPE_220M; break; // t5-base
  1161. case 2048: type = LLM_TYPE_250M; break; // flan-t5-base
  1162. default: type = LLM_TYPE_UNKNOWN;
  1163. } break;
  1164. case 24:
  1165. switch (hparams.n_ff()) {
  1166. case 4096: type = LLM_TYPE_770M; break; // t5-large
  1167. case 2816: type = LLM_TYPE_780M; break; // flan-t5-large
  1168. case 16384: type = LLM_TYPE_3B; break; // t5-3b
  1169. case 5120: type = LLM_TYPE_3B; break; // flan-t5-xl
  1170. case 65536: type = LLM_TYPE_11B; break; // t5-11b
  1171. case 10240: type = LLM_TYPE_11B; break; // flan-t5-xxl
  1172. default: type = LLM_TYPE_UNKNOWN;
  1173. } break;
  1174. default: type = LLM_TYPE_UNKNOWN;
  1175. }
  1176. } break;
  1177. case LLM_ARCH_T5ENCODER:
  1178. {
  1179. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1180. ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
  1181. type = LLM_TYPE_UNKNOWN;
  1182. } break;
  1183. case LLM_ARCH_JAIS:
  1184. {
  1185. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  1186. ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
  1187. switch (hparams.n_layer) {
  1188. case 24: type = LLM_TYPE_1_3B; break;
  1189. case 40: type = LLM_TYPE_13B; break;
  1190. /* TODO: add variants */
  1191. default: type = LLM_TYPE_UNKNOWN;
  1192. }
  1193. } break;
  1194. case LLM_ARCH_NEMOTRON:
  1195. {
  1196. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  1197. switch (hparams.n_layer) {
  1198. case 32: type = LLM_TYPE_4B; break;
  1199. default: type = LLM_TYPE_UNKNOWN;
  1200. }
  1201. } break;
  1202. case LLM_ARCH_EXAONE:
  1203. {
  1204. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1205. switch (hparams.n_layer) {
  1206. case 32: type = LLM_TYPE_8B; break;
  1207. default: type = LLM_TYPE_UNKNOWN;
  1208. }
  1209. } break;
  1210. case LLM_ARCH_RWKV6:
  1211. case LLM_ARCH_RWKV6QWEN2:
  1212. {
  1213. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps, false);
  1214. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps, false);
  1215. ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size);
  1216. ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim);
  1217. ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim);
  1218. ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false);
  1219. ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT, hparams.token_shift_count, false);
  1220. switch (hparams.n_layer) {
  1221. case 24: type = LLM_TYPE_1_6B; break;
  1222. case 32:
  1223. switch (hparams.n_embd) {
  1224. case 2560: type = LLM_TYPE_3B; break;
  1225. case 4096: type = LLM_TYPE_7B; break;
  1226. default: type = LLM_TYPE_UNKNOWN;
  1227. } break;
  1228. case 61: type = LLM_TYPE_14B; break;
  1229. case 64: type = LLM_TYPE_32B; break;
  1230. default: type = LLM_TYPE_UNKNOWN;
  1231. }
  1232. } break;
  1233. case LLM_ARCH_RWKV7:
  1234. case LLM_ARCH_ARWKV7:
  1235. {
  1236. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps, false);
  1237. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps, false);
  1238. ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size);
  1239. ml.get_key(LLM_KV_ATTENTION_DECAY_LORA_RANK, hparams.n_lora_decay);
  1240. ml.get_key(LLM_KV_ATTENTION_ICLR_LORA_RANK, hparams.n_lora_iclr);
  1241. ml.get_key(LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, hparams.n_lora_value_res_mix);
  1242. ml.get_key(LLM_KV_ATTENTION_GATE_LORA_RANK, hparams.n_lora_gate, false);
  1243. ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT, hparams.token_shift_count, false);
  1244. switch (hparams.n_layer) {
  1245. case 12: type = LLM_TYPE_190M; break;
  1246. case 24:
  1247. switch (hparams.n_embd) {
  1248. case 1024: type = LLM_TYPE_450M; break;
  1249. case 2048: type = LLM_TYPE_1_5B; break;
  1250. default: type = LLM_TYPE_UNKNOWN;
  1251. } break;
  1252. case 28:
  1253. switch (hparams.n_embd) {
  1254. case 1536: type = LLM_TYPE_1_5B; break;
  1255. case 3584: type = LLM_TYPE_7B; break;
  1256. default: type = LLM_TYPE_UNKNOWN;
  1257. } break;
  1258. case 32: type = LLM_TYPE_2_9B; break; // RWKV-7-World
  1259. default: type = LLM_TYPE_UNKNOWN;
  1260. }
  1261. } break;
  1262. case LLM_ARCH_GRANITE:
  1263. case LLM_ARCH_GRANITE_MOE:
  1264. {
  1265. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1266. ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
  1267. ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
  1268. ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
  1269. ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale);
  1270. switch (hparams.n_layer) {
  1271. case 32: type = LLM_TYPE_3B; break;
  1272. case 40: type = LLM_TYPE_3B; break;
  1273. // Add additional layer/vocab/etc checks here for other model sizes
  1274. default: type = LLM_TYPE_UNKNOWN;
  1275. }
  1276. // For Granite MoE Shared
  1277. ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, /* required */ false);
  1278. } break;
  1279. case LLM_ARCH_CHAMELEON:
  1280. {
  1281. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1282. hparams.f_norm_eps = 1e-5; // eps for qk-norm, torch default
  1283. ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm);
  1284. switch (hparams.n_layer) {
  1285. case 32: type = LLM_TYPE_7B; break;
  1286. case 48: type = LLM_TYPE_34B; break;
  1287. default: type = LLM_TYPE_UNKNOWN;
  1288. }
  1289. } break;
  1290. case LLM_ARCH_WAVTOKENIZER_DEC:
  1291. {
  1292. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  1293. ml.get_key(LLM_KV_ATTENTION_GROUPNORM_EPS, hparams.f_norm_group_eps);
  1294. ml.get_key(LLM_KV_ATTENTION_GROUPNORM_GROUPS, hparams.n_norm_groups);
  1295. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  1296. } break;
  1297. case LLM_ARCH_BAILINGMOE:
  1298. {
  1299. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1300. ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
  1301. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
  1302. ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
  1303. ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
  1304. ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false);
  1305. switch (hparams.n_layer) {
  1306. case 28: type = LLM_TYPE_16B; break;
  1307. case 88: type = LLM_TYPE_290B; break;
  1308. default: type = LLM_TYPE_UNKNOWN;
  1309. }
  1310. } break;
  1311. case LLM_ARCH_DOTS1:
  1312. {
  1313. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1314. ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
  1315. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
  1316. ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
  1317. ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
  1318. ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false);
  1319. ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false);
  1320. switch (hparams.n_layer) {
  1321. case 62: type = LLM_TYPE_142B; break;
  1322. default: type = LLM_TYPE_UNKNOWN;
  1323. }
  1324. } break;
  1325. default: throw std::runtime_error("unsupported model architecture");
  1326. }
  1327. pimpl->n_bytes = ml.n_bytes;
  1328. pimpl->desc_str = arch_name() + " " + type_name() + " " + ml.ftype_name();
  1329. if (hparams.f_max_alibi_bias > 0.0f) {
  1330. hparams.use_alibi = true;
  1331. }
  1332. hparams.rope_type = llama_model_rope_type(this);
  1333. }
  1334. void llama_model::load_vocab(llama_model_loader & ml) {
  1335. const auto kv = LLM_KV(arch);
  1336. vocab.load(ml, kv);
  1337. }
  1338. bool llama_model::load_tensors(llama_model_loader & ml) {
  1339. const auto & split_mode = params.split_mode;
  1340. const auto & n_gpu_layers = params.n_gpu_layers;
  1341. const auto & use_mlock = params.use_mlock;
  1342. const auto & tensor_split = params.tensor_split;
  1343. const int n_layer = hparams.n_layer;
  1344. const bool use_mmap_buffer = true;
  1345. LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false");
  1346. // build a list of buffer types for the CPU and GPU devices
  1347. pimpl->cpu_buft_list = make_cpu_buft_list(devices);
  1348. for (auto * dev : devices) {
  1349. buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split);
  1350. // add CPU buffer types as a fallback
  1351. buft_list.insert(buft_list.end(), pimpl->cpu_buft_list.begin(), pimpl->cpu_buft_list.end());
  1352. pimpl->gpu_buft_list.emplace(dev, std::move(buft_list));
  1353. }
  1354. // calculate the split points
  1355. bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + n_devices(), [](float x) { return x == 0.0f; });
  1356. std::vector<float> splits(n_devices());
  1357. if (all_zero) {
  1358. // default split, by free memory
  1359. for (size_t i = 0; i < n_devices(); ++i) {
  1360. ggml_backend_dev_t dev = devices[i];
  1361. size_t total;
  1362. size_t free;
  1363. ggml_backend_dev_memory(dev, &free, &total);
  1364. splits[i] = free;
  1365. }
  1366. } else {
  1367. std::copy(tensor_split, tensor_split + n_devices(), splits.begin());
  1368. }
  1369. // sum and normalize the splits to get the split points
  1370. float split_sum = 0.0f;
  1371. for (size_t i = 0; i < n_devices(); ++i) {
  1372. split_sum += splits[i];
  1373. splits[i] = split_sum;
  1374. }
  1375. for (size_t i = 0; i < n_devices(); ++i) {
  1376. splits[i] /= split_sum;
  1377. }
  1378. ggml_backend_dev_t cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
  1379. if (cpu_dev == nullptr) {
  1380. throw std::runtime_error(format("%s: no CPU backend found", __func__));
  1381. }
  1382. const int i_gpu_start = std::max((int) hparams.n_layer - n_gpu_layers, (int) 0);
  1383. const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, (int)n_layer + 1);
  1384. auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev {
  1385. const bool is_swa = il < (int) hparams.n_layer && hparams.is_swa(il);
  1386. if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) {
  1387. LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s, is_swa = %d\n", il, ggml_backend_dev_name(cpu_dev), is_swa);
  1388. return {cpu_dev, &pimpl->cpu_buft_list};
  1389. }
  1390. const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin();
  1391. auto * dev = devices.at(layer_gpu);
  1392. LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s, is_swa = %d\n", il, ggml_backend_dev_name(dev), is_swa);
  1393. return {dev, &pimpl->gpu_buft_list.at(dev)};
  1394. };
  1395. // assign the input layer
  1396. // there is very little benefit to offloading the input layer, so always keep it on the CPU
  1397. pimpl->dev_input = { cpu_dev, &pimpl->cpu_buft_list };
  1398. // assign the repeating layers to the devices according to the splits
  1399. pimpl->dev_layer.resize(n_layer);
  1400. for (int il = 0; il < n_layer; ++il) {
  1401. pimpl->dev_layer[il] = get_layer_buft_list(il);
  1402. }
  1403. // assign the output layer
  1404. pimpl->dev_output = get_layer_buft_list(n_layer);
  1405. // one ggml context per buffer type
  1406. int max_n_tensors = ml.n_tensors;
  1407. max_n_tensors += 1; // duplicated output tensor
  1408. max_n_tensors += n_layer*2; // duplicated rope freq tensors
  1409. const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors;
  1410. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  1411. auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
  1412. auto it = ctx_map.find(buft);
  1413. if (it == ctx_map.end()) {
  1414. ggml_init_params params = {
  1415. /*.mem_size =*/ ctx_size,
  1416. /*.mem_buffer =*/ NULL,
  1417. /*.no_alloc =*/ true,
  1418. };
  1419. ggml_context * ctx = ggml_init(params);
  1420. if (!ctx) {
  1421. throw std::runtime_error(format("failed to create ggml context"));
  1422. }
  1423. ctx_map[buft] = ctx;
  1424. pimpl->ctxs.emplace_back(ctx);
  1425. return ctx;
  1426. }
  1427. return it->second;
  1428. };
  1429. const auto TENSOR_DUPLICATED = llama_model_loader::TENSOR_DUPLICATED;
  1430. const auto TENSOR_NOT_REQUIRED = llama_model_loader::TENSOR_NOT_REQUIRED;
  1431. // create tensors for the weights
  1432. {
  1433. // note: cast to int64_t since we will use these for the tensor dimensions
  1434. const int64_t n_head = hparams.n_head();
  1435. const int64_t n_head_kv = hparams.n_head_kv();
  1436. const int64_t n_embd = hparams.n_embd;
  1437. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  1438. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  1439. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  1440. const int64_t n_embd_head_v = hparams.n_embd_head_v;
  1441. const int64_t n_ff = hparams.n_ff();
  1442. const int64_t n_embd_gqa = n_embd_v_gqa;
  1443. const int64_t n_vocab = vocab.n_tokens();
  1444. const int64_t n_token_types = vocab.n_token_types();
  1445. const int64_t n_rot = hparams.n_rot;
  1446. const int64_t n_expert = hparams.n_expert;
  1447. const int64_t n_expert_used = hparams.n_expert_used;
  1448. const int64_t n_ctx_train = hparams.n_ctx_train;
  1449. if (n_expert > 0 && hparams.n_expert_used == 0) {
  1450. throw std::runtime_error("model has expert layers but no expert layers are used");
  1451. }
  1452. int n_moved_tensors = 0;
  1453. ggml_tensor * first_moved_tensor = nullptr;
  1454. ggml_backend_buffer_type_t first_moved_from_buft = nullptr;
  1455. ggml_backend_buffer_type_t first_moved_to_buft = nullptr;
  1456. auto create_tensor = [&](const LLM_TN_IMPL & tn, const std::initializer_list<int64_t> & ne, int flags) -> ggml_tensor * {
  1457. ggml_tensor * t_meta = ml.get_tensor_meta(tn.str().c_str());
  1458. if (!t_meta) {
  1459. if (flags & TENSOR_NOT_REQUIRED) {
  1460. return nullptr;
  1461. }
  1462. throw std::runtime_error(format("missing tensor '%s'", tn.str().c_str()));
  1463. }
  1464. // some models use the token embedding tensor as the output, but since these are used in different layers and with different ops
  1465. // the tensor is duplicated
  1466. // to handle this, we check if the tensor is duplicated, and if so, we assume that it is being loaded as the output tensor
  1467. llm_tensor tn_tensor = tn.tensor;
  1468. if (tn.tensor == LLM_TENSOR_TOKEN_EMBD && flags & TENSOR_DUPLICATED) {
  1469. tn_tensor = LLM_TENSOR_OUTPUT;
  1470. }
  1471. llm_tensor_info info;
  1472. try {
  1473. info = llm_tensor_info_for(tn_tensor);
  1474. } catch (const std::out_of_range & e) {
  1475. throw std::runtime_error(format("missing tensor info mapping for %s", tn.str().c_str()));
  1476. }
  1477. // skip unused tensors
  1478. if (info.op == GGML_OP_NONE) {
  1479. const size_t nbytes = ggml_nbytes(t_meta);
  1480. LLAMA_LOG_WARN("model has unused tensor %s (size = %zu bytes) -- ignoring\n", tn.str().c_str(), nbytes);
  1481. ml.size_data -= nbytes;
  1482. ml.n_created++;
  1483. return nullptr;
  1484. }
  1485. // tensors with "bias" suffix are always used with GGML_OP_ADD
  1486. ggml_op op;
  1487. bool bias = tn.suffix != nullptr && strcmp(tn.suffix, "bias") == 0;
  1488. if (bias) {
  1489. op = GGML_OP_ADD;
  1490. } else {
  1491. op = info.op;
  1492. }
  1493. // sanity checks
  1494. if (info.layer == LLM_TENSOR_LAYER_INPUT || info.layer == LLM_TENSOR_LAYER_OUTPUT) {
  1495. if (tn.bid != -1) {
  1496. GGML_ABORT("input/output layer tensor %s used with a layer number", tn.str().c_str());
  1497. }
  1498. } else {
  1499. if (tn.bid == -1) {
  1500. GGML_ABORT("repeating layer tensor %s used without a layer number", tn.str().c_str());
  1501. }
  1502. }
  1503. // select the buffer type for this tensor
  1504. buft_list_t * buft_list;
  1505. switch (info.layer) {
  1506. case LLM_TENSOR_LAYER_INPUT:
  1507. buft_list = pimpl->dev_input.buft_list;
  1508. break;
  1509. case LLM_TENSOR_LAYER_OUTPUT:
  1510. buft_list = pimpl->dev_output.buft_list;
  1511. break;
  1512. case LLM_TENSOR_LAYER_REPEATING:
  1513. buft_list = pimpl->dev_layer.at(tn.bid).buft_list;
  1514. break;
  1515. default:
  1516. GGML_ABORT("invalid layer %d for tensor %s", info.layer, tn.str().c_str());
  1517. }
  1518. ggml_backend_buffer_type_t buft = nullptr;
  1519. // check overrides
  1520. if (ml.tensor_buft_overrides) {
  1521. std::string tensor_name = tn.str();
  1522. for (const auto * overrides = ml.tensor_buft_overrides; overrides->pattern != nullptr; ++overrides) {
  1523. std::regex pattern(overrides->pattern);
  1524. if (std::regex_search(tensor_name, pattern)) {
  1525. buft = overrides->buft;
  1526. LLAMA_LOG_DEBUG("tensor %s (%zu MiB %s) buffer type overridden to %s\n",
  1527. tensor_name.c_str(),
  1528. ggml_nbytes(t_meta) / 1024 / 1024, ggml_type_name(t_meta->type),
  1529. ggml_backend_buft_name(buft));
  1530. break;
  1531. }
  1532. }
  1533. }
  1534. if (!buft) {
  1535. buft = select_weight_buft(hparams, t_meta, op, *buft_list);
  1536. if (!buft) {
  1537. throw std::runtime_error(format("failed to find a compatible buffer type for tensor %s", tn.str().c_str()));
  1538. }
  1539. }
  1540. // avoid using a host buffer when using mmap
  1541. auto * buft_dev = ggml_backend_buft_get_device(buft);
  1542. if (ml.use_mmap && buft_dev && buft == ggml_backend_dev_host_buffer_type(buft_dev)) {
  1543. auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
  1544. if (!cpu_dev) {
  1545. throw std::runtime_error("no CPU backend found");
  1546. }
  1547. buft = ggml_backend_dev_buffer_type(cpu_dev);
  1548. }
  1549. if (buft != buft_list->front().second) {
  1550. n_moved_tensors++;
  1551. if (!first_moved_tensor) {
  1552. first_moved_tensor = t_meta;
  1553. first_moved_from_buft = buft_list->front().second;
  1554. first_moved_to_buft = buft;
  1555. }
  1556. }
  1557. ggml_context * ctx = ctx_for_buft(buft);
  1558. // if duplicated, check if the original tensor was allocated in the same buffer type context and avoid creating a new one
  1559. if (flags & TENSOR_DUPLICATED) {
  1560. ggml_tensor * t = ggml_get_tensor(ctx, tn.str().c_str());
  1561. if (t) {
  1562. return t;
  1563. }
  1564. }
  1565. return ml.create_tensor(ctx, tn, ne, flags);
  1566. };
  1567. layers.resize(n_layer);
  1568. // TODO: move to a separate function
  1569. const auto tn = LLM_TN(arch);
  1570. switch (arch) {
  1571. case LLM_ARCH_LLAMA:
  1572. case LLM_ARCH_REFACT:
  1573. case LLM_ARCH_MINICPM:
  1574. case LLM_ARCH_GRANITE:
  1575. case LLM_ARCH_GRANITE_MOE:
  1576. {
  1577. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1578. // output
  1579. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1580. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1581. // if output is NULL, init from the input tok embed
  1582. if (output == NULL) {
  1583. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1584. }
  1585. for (int i = 0; i < n_layer; ++i) {
  1586. auto & layer = layers[i];
  1587. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1588. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  1589. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  1590. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  1591. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  1592. // optional bias tensors
  1593. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1594. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1595. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1596. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1597. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1598. if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
  1599. layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1600. layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1601. }
  1602. else {
  1603. layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1604. }
  1605. if (n_expert == 0) {
  1606. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1607. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1608. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1609. // optional MLP bias
  1610. layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  1611. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1612. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  1613. } else {
  1614. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  1615. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, TENSOR_NOT_REQUIRED);
  1616. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0);
  1617. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  1618. // For Granite MoE Shared
  1619. if (hparams.n_ff_shexp > 0) {
  1620. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
  1621. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
  1622. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0);
  1623. }
  1624. }
  1625. }
  1626. } break;
  1627. case LLM_ARCH_LLAMA4:
  1628. {
  1629. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1630. // output
  1631. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1632. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1633. // if output is NULL, init from the input tok embed
  1634. if (output == NULL) {
  1635. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1636. }
  1637. GGML_ASSERT(hparams.n_moe_layer_step > 0 && "Llama 4 requires n_moe_layer_step > 0");
  1638. for (int i = 0; i < n_layer; ++i) {
  1639. bool is_moe_layer = (i + 1) % hparams.n_moe_layer_step == 0;
  1640. auto & layer = layers[i];
  1641. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1642. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  1643. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  1644. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  1645. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  1646. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1647. layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1648. if (is_moe_layer) {
  1649. int n_ff_exp = hparams.n_ff_exp;
  1650. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  1651. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, 0);
  1652. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert}, 0);
  1653. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, 0);
  1654. // Shared expert
  1655. const int64_t n_ff_shexp = n_ff_exp;
  1656. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), { n_embd, n_ff_shexp}, 0);
  1657. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd }, 0);
  1658. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), { n_embd, n_ff_shexp}, 0);
  1659. } else {
  1660. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1661. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1662. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1663. }
  1664. }
  1665. } break;
  1666. case LLM_ARCH_DECI:
  1667. {
  1668. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1669. // output
  1670. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1671. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1672. // if output is NULL, init from the input tok embed
  1673. if (output == NULL) {
  1674. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1675. }
  1676. for (int i = 0; i < n_layer; ++i) {
  1677. auto & layer = layers[i];
  1678. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(i);
  1679. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(i);
  1680. const int64_t n_embd_gqa = hparams.n_embd_v_gqa(i);
  1681. const int64_t n_ff = hparams.n_ff(i);
  1682. const int64_t n_head = hparams.n_head(i);
  1683. const int64_t n_head_kv = hparams.n_head_kv(i);
  1684. if (n_head_kv == 0 && n_head > 0) {
  1685. // linear attention for DeciLMCausalModel
  1686. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1687. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1688. }
  1689. else if (n_head_kv > 0) {
  1690. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1691. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  1692. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  1693. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  1694. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  1695. }
  1696. // optional bias tensors
  1697. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1698. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1699. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1700. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1701. if (n_ff > 0) {
  1702. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1703. }
  1704. if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
  1705. layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1706. layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1707. }
  1708. else {
  1709. layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1710. }
  1711. if (n_ff > 0) {
  1712. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1713. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1714. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1715. }
  1716. // optional MLP bias
  1717. layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  1718. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1719. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  1720. }
  1721. } break;
  1722. case LLM_ARCH_MINICPM3:
  1723. {
  1724. const int64_t n_embd_head_qk_rope = hparams.n_rot;
  1725. const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  1726. const int64_t q_lora_rank = hparams.n_lora_q;
  1727. const int64_t kv_lora_rank = hparams.n_lora_kv;
  1728. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1729. // output
  1730. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1731. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1732. // if output is NULL, init from the input tok embed
  1733. if (output == NULL) {
  1734. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1735. }
  1736. for (int i = 0; i < n_layer; ++i) {
  1737. auto & layer = layers[i];
  1738. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1739. layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
  1740. layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
  1741. layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
  1742. layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k}, 0);
  1743. layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
  1744. layer.wkv_b = create_tensor(tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
  1745. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_head * ( n_embd_head_v), n_embd}, 0);
  1746. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1747. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1748. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1749. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1750. layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head_qk_rope/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1751. layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head_qk_rope/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1752. }
  1753. } break;
  1754. case LLM_ARCH_GROK:
  1755. {
  1756. if (n_expert == 0) {
  1757. throw std::runtime_error("Grok model cannot have zero experts");
  1758. }
  1759. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1760. // output
  1761. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1762. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1763. // if output is NULL, init from the input tok embed
  1764. if (output == NULL) {
  1765. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1766. }
  1767. for (int i = 0; i < n_layer; ++i) {
  1768. auto & layer = layers[i];
  1769. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1770. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  1771. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  1772. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  1773. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1774. layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
  1775. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1776. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  1777. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, TENSOR_NOT_REQUIRED);
  1778. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0);
  1779. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  1780. layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
  1781. }
  1782. } break;
  1783. case LLM_ARCH_DBRX:
  1784. {
  1785. if (n_expert == 0) {
  1786. throw std::runtime_error("DBRX model cannot have zero experts");
  1787. }
  1788. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1789. // output
  1790. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1791. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  1792. for (int i = 0; i < n_layer; ++i) {
  1793. auto & layer = layers[i];
  1794. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1795. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  1796. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1797. layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
  1798. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  1799. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  1800. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0);
  1801. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  1802. }
  1803. } break;
  1804. case LLM_ARCH_BAICHUAN:
  1805. {
  1806. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1807. {
  1808. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1809. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  1810. }
  1811. for (int i = 0; i < n_layer; ++i) {
  1812. auto & layer = layers[i];
  1813. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1814. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  1815. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  1816. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  1817. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1818. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1819. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1820. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1821. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1822. }
  1823. } break;
  1824. case LLM_ARCH_FALCON:
  1825. {
  1826. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1827. // output
  1828. {
  1829. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1830. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  1831. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1832. if (!output) {
  1833. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // needs to be on GPU
  1834. }
  1835. }
  1836. for (int i = 0; i < n_layer; ++i) {
  1837. auto & layer = layers[i];
  1838. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1839. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  1840. layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1841. layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1842. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  1843. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1844. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1845. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1846. }
  1847. } break;
  1848. case LLM_ARCH_STARCODER:
  1849. {
  1850. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1851. pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0);
  1852. // output
  1853. {
  1854. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1855. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  1856. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1857. if (!output) {
  1858. // needs to be on GPU
  1859. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1860. }
  1861. }
  1862. for (int i = 0; i < n_layer; ++i) {
  1863. auto & layer = layers[i];
  1864. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1865. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  1866. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  1867. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  1868. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1869. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  1870. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1871. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  1872. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  1873. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  1874. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1875. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  1876. }
  1877. } break;
  1878. case LLM_ARCH_BERT:
  1879. case LLM_ARCH_NOMIC_BERT:
  1880. case LLM_ARCH_NOMIC_BERT_MOE:
  1881. {
  1882. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1883. type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, TENSOR_NOT_REQUIRED);
  1884. if (arch == LLM_ARCH_BERT) {
  1885. pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0);
  1886. cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED);
  1887. cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {n_embd}, TENSOR_NOT_REQUIRED);
  1888. cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED);
  1889. cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {hparams.n_cls_out}, TENSOR_NOT_REQUIRED);
  1890. }
  1891. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
  1892. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
  1893. for (int i = 0; i < n_layer; ++i) {
  1894. auto & layer = layers[i];
  1895. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1896. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1897. if (!layer.wqkv) {
  1898. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  1899. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  1900. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  1901. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  1902. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  1903. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  1904. }
  1905. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1906. layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
  1907. layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd}, 0);
  1908. if (hparams.moe_every_n_layers > 0 && i % hparams.moe_every_n_layers == 1) {
  1909. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  1910. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff, n_expert}, 0);
  1911. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0);
  1912. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  1913. } else {
  1914. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1915. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  1916. if (arch == LLM_ARCH_BERT || arch == LLM_ARCH_NOMIC_BERT_MOE) {
  1917. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  1918. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  1919. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  1920. } else {
  1921. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1922. }
  1923. }
  1924. layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
  1925. layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}, 0);
  1926. }
  1927. } break;
  1928. case LLM_ARCH_JINA_BERT_V2:
  1929. {
  1930. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); // word_embeddings
  1931. type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, 0); // token_type_embeddings
  1932. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); // LayerNorm
  1933. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0); //LayerNorm bias
  1934. cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, 1}, TENSOR_NOT_REQUIRED);
  1935. cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {1}, TENSOR_NOT_REQUIRED);
  1936. for (int i = 0; i < n_layer; ++i) {
  1937. auto & layer = layers[i]; // JinaBertLayer
  1938. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  1939. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  1940. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1941. layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1942. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  1943. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  1944. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1945. layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1946. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  1947. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  1948. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); //output_dens
  1949. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); //output_dens
  1950. layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0); //output_norm
  1951. layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd}, 0);
  1952. layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1953. layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1954. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
  1955. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, layer.ffn_gate ? n_ff : n_ff * 2}, 0);
  1956. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  1957. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  1958. layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
  1959. layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}, 0);
  1960. }
  1961. } break;
  1962. case LLM_ARCH_BLOOM:
  1963. {
  1964. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1965. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
  1966. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
  1967. // output
  1968. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1969. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  1970. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1971. // if output is NULL, init from the input tok embed
  1972. if (output == NULL) {
  1973. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1974. }
  1975. for (int i = 0; i < n_layer; ++i) {
  1976. auto & layer = layers[i];
  1977. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1978. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  1979. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  1980. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  1981. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1982. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  1983. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1984. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  1985. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  1986. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  1987. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1988. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  1989. }
  1990. } break;
  1991. case LLM_ARCH_MPT:
  1992. {
  1993. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1994. pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, TENSOR_NOT_REQUIRED);
  1995. // output
  1996. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1997. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, TENSOR_NOT_REQUIRED);
  1998. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1999. if (!output) {
  2000. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // needs to be on GPU
  2001. }
  2002. for (int i = 0; i < n_layer; ++i) {
  2003. auto & layer = layers[i];
  2004. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2005. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2006. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2007. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2008. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2009. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2010. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2011. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2012. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2013. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2014. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2015. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  2016. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2017. layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2018. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2019. layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2020. // AWQ ScaleActivation layer
  2021. layer.ffn_act = create_tensor(tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, TENSOR_NOT_REQUIRED);
  2022. }
  2023. } break;
  2024. case LLM_ARCH_STABLELM:
  2025. {
  2026. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2027. // output
  2028. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2029. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2030. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2031. for (int i = 0; i < n_layer; ++i) {
  2032. auto & layer = layers[i];
  2033. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2034. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2035. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2036. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2037. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2038. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2039. // optional bias tensors, present in Stable LM 2 1.6B
  2040. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2041. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2042. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2043. // optional q and k layernorms, present in StableLM 2 12B
  2044. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, TENSOR_NOT_REQUIRED);
  2045. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, TENSOR_NOT_REQUIRED);
  2046. // optional FFN norm, not present in StableLM 2 12B which uses parallel residual
  2047. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2048. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2049. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2050. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2051. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2052. }
  2053. } break;
  2054. case LLM_ARCH_QWEN:
  2055. {
  2056. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2057. // output
  2058. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2059. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2060. for (int i = 0; i < n_layer; ++i) {
  2061. auto & layer = layers[i];
  2062. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2063. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3}, 0);
  2064. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd*3}, 0);
  2065. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2066. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2067. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2}, 0);
  2068. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd}, 0);
  2069. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff/2}, 0);
  2070. }
  2071. } break;
  2072. case LLM_ARCH_QWEN2:
  2073. case LLM_ARCH_QWEN2VL:
  2074. {
  2075. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2076. // output
  2077. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2078. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2079. // if output is NULL, init from the input tok embed
  2080. if (output == NULL) {
  2081. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2082. }
  2083. for (int i = 0; i < n_layer; ++i) {
  2084. auto & layer = layers[i];
  2085. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2086. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2087. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2088. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2089. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2090. // optional bias tensors
  2091. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  2092. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  2093. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  2094. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2095. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2096. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2097. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2098. }
  2099. } break;
  2100. case LLM_ARCH_QWEN2MOE:
  2101. {
  2102. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2103. // output
  2104. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2105. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2106. for (int i = 0; i < n_layer; ++i) {
  2107. auto & layer = layers[i];
  2108. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2109. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2110. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2111. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2112. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2113. // optional bias tensors
  2114. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2115. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2116. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2117. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2118. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2119. if (n_expert == 0) {
  2120. throw std::runtime_error("n_expert must be > 0 for QWEN2MOE");
  2121. }
  2122. if (n_expert_used == 0) {
  2123. throw std::runtime_error("n_expert_used must be > 0 for QWEN2MOE");
  2124. }
  2125. // MoE branch
  2126. const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
  2127. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2128. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  2129. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2130. // Shared expert branch
  2131. const int64_t n_ff_shexp = hparams.n_ff_shexp ? hparams.n_ff_shexp : n_ff;
  2132. layer.ffn_gate_inp_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), {n_embd}, 0);
  2133. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), { n_embd, n_ff_shexp}, 0);
  2134. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, 0);
  2135. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), { n_embd, n_ff_shexp}, 0);
  2136. }
  2137. } break;
  2138. case LLM_ARCH_QWEN3:
  2139. {
  2140. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2141. // output
  2142. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2143. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2144. // if output is NULL, init from the input tok embed
  2145. if (output == NULL) {
  2146. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2147. }
  2148. for (int i = 0; i < n_layer; ++i) {
  2149. auto & layer = layers[i];
  2150. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2151. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2152. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2153. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2154. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2155. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
  2156. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
  2157. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2158. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2159. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2160. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2161. }
  2162. } break;
  2163. case LLM_ARCH_QWEN3MOE:
  2164. {
  2165. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2166. // output
  2167. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2168. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2169. // if output is NULL, init from the input tok embed
  2170. if (output == NULL) {
  2171. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2172. }
  2173. for (int i = 0; i < n_layer; ++i) {
  2174. auto & layer = layers[i];
  2175. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2176. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2177. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2178. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2179. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2180. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
  2181. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
  2182. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2183. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2184. if (n_expert == 0) {
  2185. throw std::runtime_error("n_expert must be > 0 for QWEN3MOE");
  2186. }
  2187. if (n_expert_used == 0) {
  2188. throw std::runtime_error("n_expert_used must be > 0 for QWEN3MOE");
  2189. }
  2190. // MoE branch
  2191. const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
  2192. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2193. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  2194. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2195. }
  2196. } break;
  2197. case LLM_ARCH_PHI2:
  2198. {
  2199. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2200. // output
  2201. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2202. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2203. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2204. output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, 0);
  2205. for (int i = 0; i < n_layer; ++i) {
  2206. auto & layer = layers[i];
  2207. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2208. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2209. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2210. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2211. if (layer.wqkv == nullptr) {
  2212. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2213. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  2214. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2215. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  2216. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2217. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  2218. }
  2219. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2220. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2221. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2222. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2223. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2224. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  2225. }
  2226. } break;
  2227. case LLM_ARCH_PHI3:
  2228. {
  2229. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
  2230. // output
  2231. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
  2232. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2233. // if output is NULL, init from the input tok embed
  2234. if (output == NULL) {
  2235. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2236. }
  2237. for (int i = 0; i < n_layer; ++i) {
  2238. auto & layer = layers[i];
  2239. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
  2240. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, TENSOR_NOT_REQUIRED);
  2241. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
  2242. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
  2243. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
  2244. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff }, 0);
  2245. layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  2246. layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  2247. }
  2248. } break;
  2249. case LLM_ARCH_PHIMOE:
  2250. {
  2251. const int64_t n_embd_head = n_embd / n_head;
  2252. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
  2253. // output
  2254. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
  2255. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2256. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, 0);
  2257. output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), { n_vocab }, 0);
  2258. for (int i = 0; i < n_layer; ++i) {
  2259. auto & layer = layers[i];
  2260. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
  2261. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), { n_embd }, 0);
  2262. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, TENSOR_NOT_REQUIRED);
  2263. if (layer.wqkv == nullptr) {
  2264. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2265. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  2266. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2267. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  2268. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2269. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  2270. }
  2271. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
  2272. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, 0);
  2273. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
  2274. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), { n_embd }, 0);
  2275. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2276. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  2277. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0);
  2278. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  2279. layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  2280. layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  2281. }
  2282. } break;
  2283. case LLM_ARCH_PLAMO:
  2284. {
  2285. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2286. // output
  2287. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2288. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2289. for (int i = 0; i < n_layer; ++i) {
  2290. auto & layer = layers[i];
  2291. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2292. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2293. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2294. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2295. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2296. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2297. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2298. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2299. }
  2300. } break;
  2301. case LLM_ARCH_GPT2:
  2302. {
  2303. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2304. pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0);
  2305. // output
  2306. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2307. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2308. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2309. // if output is NULL, init from the input tok embed
  2310. if (output == NULL) {
  2311. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2312. }
  2313. for (int i = 0; i < n_layer; ++i) {
  2314. auto & layer = layers[i];
  2315. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2316. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2317. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2318. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  2319. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2320. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2321. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2322. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2323. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2324. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2325. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2326. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  2327. }
  2328. } break;
  2329. case LLM_ARCH_CODESHELL:
  2330. {
  2331. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2332. // if tok embd is NULL, init from output
  2333. if (tok_embd == NULL) {
  2334. tok_embd = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2335. }
  2336. // output
  2337. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2338. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2339. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2340. for (int i = 0; i < n_layer; ++i) {
  2341. auto & layer = layers[i];
  2342. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2343. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2344. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2345. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  2346. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2347. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2348. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2349. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2350. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2351. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2352. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2353. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  2354. }
  2355. } break;
  2356. case LLM_ARCH_ORION:
  2357. {
  2358. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2359. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2360. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2361. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2362. for (int i = 0; i < n_layer; ++i) {
  2363. auto & layer = layers[i];
  2364. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2365. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2366. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2367. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2368. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2369. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2370. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2371. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2372. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2373. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2374. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2375. }
  2376. } break;
  2377. case LLM_ARCH_INTERNLM2:
  2378. {
  2379. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2380. // output
  2381. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2382. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2383. for (int i = 0; i < n_layer; ++i) {
  2384. auto & layer = layers[i];
  2385. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2386. // layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2387. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2388. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2389. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2390. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2391. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2392. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2393. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2394. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2395. }
  2396. } break;
  2397. case LLM_ARCH_GEMMA:
  2398. {
  2399. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2400. // output
  2401. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2402. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
  2403. for (int i = 0; i < n_layer; ++i) {
  2404. auto & layer = layers[i];
  2405. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2406. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2407. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2408. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2409. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2410. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2411. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2412. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2413. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2414. }
  2415. } break;
  2416. case LLM_ARCH_GEMMA2:
  2417. {
  2418. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2419. // output
  2420. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2421. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
  2422. for (int i = 0; i < n_layer; ++i) {
  2423. auto & layer = layers[i];
  2424. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2425. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2426. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2427. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2428. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2429. layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
  2430. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2431. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2432. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2433. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2434. layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
  2435. }
  2436. } break;
  2437. case LLM_ARCH_GEMMA3:
  2438. {
  2439. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2440. // output
  2441. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2442. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2443. // if output is NULL, init from the input tok embed
  2444. if (output == NULL) {
  2445. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2446. }
  2447. for (int i = 0; i < n_layer; ++i) {
  2448. auto & layer = layers[i];
  2449. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2450. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2451. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2452. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2453. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2454. layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
  2455. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
  2456. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
  2457. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2458. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2459. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2460. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2461. layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
  2462. }
  2463. } break;
  2464. case LLM_ARCH_STARCODER2:
  2465. {
  2466. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2467. // output
  2468. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2469. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2470. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2471. // if output is NULL, init from the input tok embed
  2472. if (output == NULL) {
  2473. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2474. }
  2475. for (int i = 0; i < n_layer; ++i) {
  2476. auto & layer = layers[i];
  2477. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2478. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2479. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2480. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2481. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2482. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2483. // optional bias tensors
  2484. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  2485. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  2486. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  2487. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2488. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2489. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2490. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2491. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2492. // optional bias tensors
  2493. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2494. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP , "bias", i), { n_ff}, 0);
  2495. }
  2496. } break;
  2497. case LLM_ARCH_MAMBA:
  2498. {
  2499. const int64_t d_conv = hparams.ssm_d_conv;
  2500. const int64_t d_inner = hparams.ssm_d_inner;
  2501. const int64_t d_state = hparams.ssm_d_state;
  2502. const int64_t dt_rank = hparams.ssm_dt_rank;
  2503. // only an expansion factor of 2 is supported for now
  2504. if (2 * n_embd != d_inner) {
  2505. throw std::runtime_error("only an expansion factor of 2 is supported for now");
  2506. }
  2507. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2508. // output
  2509. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2510. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2511. // if output is NULL, init from the input tok embed, duplicated to allow offloading
  2512. if (output == NULL) {
  2513. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2514. }
  2515. for (int i = 0; i < n_layer; ++i) {
  2516. auto & layer = layers[i];
  2517. // norm
  2518. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2519. layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner}, 0);
  2520. layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner}, 0);
  2521. layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner}, 0);
  2522. layer.ssm_x = create_tensor(tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state}, 0);
  2523. layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner}, 0);
  2524. layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner}, 0);
  2525. // no "weight" suffix for these
  2526. layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner}, 0);
  2527. layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {d_inner}, 0);
  2528. // out_proj
  2529. layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
  2530. }
  2531. } break;
  2532. case LLM_ARCH_XVERSE:
  2533. {
  2534. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2535. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2536. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2537. for (int i = 0; i < n_layer; ++i) {
  2538. auto & layer = layers[i];
  2539. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2540. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2541. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2542. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2543. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2544. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2545. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2546. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2547. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2548. }
  2549. } break;
  2550. case LLM_ARCH_COMMAND_R:
  2551. {
  2552. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2553. // output
  2554. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2555. // init output from the input tok embed
  2556. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2557. for (int i = 0; i < n_layer; ++i) {
  2558. auto & layer = layers[i];
  2559. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2560. if (n_layer >= 64){
  2561. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
  2562. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
  2563. }
  2564. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2565. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2566. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2567. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2568. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2569. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2570. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2571. }
  2572. } break;
  2573. case LLM_ARCH_COHERE2:
  2574. {
  2575. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
  2576. // output
  2577. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
  2578. // init output from the input tok embed
  2579. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab },
  2580. TENSOR_DUPLICATED);
  2581. for (int i = 0; i < n_layer; ++i) {
  2582. auto & layer = layers[i];
  2583. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
  2584. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd }, 0);
  2585. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
  2586. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
  2587. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
  2588. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0);
  2589. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
  2590. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
  2591. }
  2592. }
  2593. break;
  2594. case LLM_ARCH_OLMO: // adapted from LLM_ARCH_LLAMA with norm params removed
  2595. {
  2596. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2597. // output
  2598. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2599. // if output is NULL, init from the input tok embed
  2600. if (output == NULL) {
  2601. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2602. }
  2603. for (int i = 0; i < n_layer; ++i) {
  2604. auto & layer = layers[i];
  2605. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2606. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2607. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2608. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2609. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2610. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2611. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2612. }
  2613. } break;
  2614. case LLM_ARCH_OLMO2:
  2615. {
  2616. const int64_t n_embd_head = n_embd / n_head;
  2617. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2618. // output
  2619. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2620. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2621. for (int i = 0; i < n_layer; ++i) {
  2622. auto & layer = layers[i];
  2623. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2624. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2625. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2626. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2627. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
  2628. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_head_kv * n_embd_head}, 0);
  2629. layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
  2630. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2631. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2632. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2633. layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
  2634. }
  2635. } break;
  2636. case LLM_ARCH_OLMOE:
  2637. {
  2638. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2639. // output
  2640. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2641. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2642. for (int i = 0; i < n_layer; ++i) {
  2643. auto & layer = layers[i];
  2644. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2645. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2646. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2647. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2648. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2649. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
  2650. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0);
  2651. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2652. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2653. if (n_expert == 0) {
  2654. throw std::runtime_error("n_expert must be > 0");
  2655. }
  2656. if (n_expert_used == 0) {
  2657. throw std::runtime_error("n_expert_used must be > 0");
  2658. }
  2659. // MoE branch
  2660. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  2661. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0);
  2662. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  2663. }
  2664. } break;
  2665. case LLM_ARCH_OPENELM:
  2666. {
  2667. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2668. // output
  2669. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2670. // init output from the input tok embed
  2671. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2672. for (int i = 0; i < n_layer; ++i) {
  2673. const int64_t n_head = hparams.n_head(i);
  2674. const int64_t n_head_qkv = 2*hparams.n_head_kv(i) + n_head;
  2675. const int64_t n_ff = hparams.n_ff(i);
  2676. auto & layer = layers[i];
  2677. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2678. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_head_qkv*n_embd_head_k}, 0);
  2679. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
  2680. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
  2681. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head*n_embd_head_k, n_embd}, 0);
  2682. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2683. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2684. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2685. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2686. }
  2687. } break;
  2688. case LLM_ARCH_GPTNEOX:
  2689. {
  2690. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2691. // output
  2692. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2693. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2694. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2695. for (int i = 0; i < n_layer; ++i) {
  2696. auto & layer = layers[i];
  2697. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2698. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2699. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2700. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  2701. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2702. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2703. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2704. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2705. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2706. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2707. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2708. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  2709. }
  2710. } break;
  2711. case LLM_ARCH_ARCTIC:
  2712. {
  2713. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2714. // output
  2715. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2716. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2717. // if output is NULL, init from the input tok embed
  2718. if (output == NULL) {
  2719. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2720. }
  2721. for (int i = 0; i < n_layer; ++i) {
  2722. auto & layer = layers[i];
  2723. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2724. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2725. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2726. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2727. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2728. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2729. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_embd}, 0);
  2730. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_embd, n_embd}, 0);
  2731. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_embd}, 0);
  2732. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2733. layer.ffn_norm_exps = create_tensor(tn(LLM_TENSOR_FFN_NORM_EXPS, "weight", i), {n_embd}, 0);
  2734. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, false);
  2735. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0);
  2736. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  2737. }
  2738. } break;
  2739. case LLM_ARCH_DEEPSEEK:
  2740. {
  2741. const int64_t n_ff_exp = hparams.n_ff_exp;
  2742. const int64_t n_expert_shared = hparams.n_expert_shared;
  2743. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2744. // output
  2745. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2746. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2747. for (int i = 0; i < n_layer; ++i) {
  2748. auto & layer = layers[i];
  2749. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2750. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2751. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2752. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2753. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2754. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2755. if (i < (int) hparams.n_layer_dense_lead) {
  2756. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2757. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2758. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2759. } else {
  2760. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2761. if (n_expert == 0) {
  2762. throw std::runtime_error("n_expert must be > 0");
  2763. }
  2764. if (n_expert_used == 0) {
  2765. throw std::runtime_error("n_expert_used must be > 0");
  2766. }
  2767. // MoE branch
  2768. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2769. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  2770. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2771. // Shared expert branch
  2772. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  2773. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0);
  2774. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  2775. }
  2776. }
  2777. } break;
  2778. case LLM_ARCH_DEEPSEEK2:
  2779. {
  2780. const bool is_lite = (hparams.n_layer == 27);
  2781. const bool is_mla = (hparams.n_embd_head_k_mla != 0 && hparams.n_embd_head_v_mla != 0);
  2782. // note: these are the actual head sizes you get when treating as MHA or after "decompression" using wv_b for MLA
  2783. const int64_t n_embd_head_k_mla = is_mla ? hparams.n_embd_head_k_mla : hparams.n_embd_head_k;
  2784. const int64_t n_embd_head_v_mla = is_mla ? hparams.n_embd_head_v_mla : hparams.n_embd_head_v;
  2785. const int64_t n_embd_head_qk_rope = hparams.n_rot;
  2786. const int64_t n_embd_head_qk_nope = n_embd_head_k_mla - n_embd_head_qk_rope;
  2787. const int64_t q_lora_rank = hparams.n_lora_q;
  2788. const int64_t kv_lora_rank = hparams.n_lora_kv;
  2789. const int64_t n_ff_exp = hparams.n_ff_exp;
  2790. const int64_t n_expert_shared = hparams.n_expert_shared;
  2791. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2792. // output
  2793. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2794. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2795. for (int i = 0; i < n_layer; ++i) {
  2796. auto & layer = layers[i];
  2797. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2798. if (!is_lite) {
  2799. layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
  2800. }
  2801. layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
  2802. if (!is_lite) {
  2803. layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
  2804. layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k_mla}, 0);
  2805. } else {
  2806. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_head * n_embd_head_k_mla}, 0);
  2807. }
  2808. layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + n_embd_head_qk_rope}, 0);
  2809. // note: only old legacy GGUF files will have the unsplit wkv_b tensor in
  2810. if (is_mla) {
  2811. layer.wk_b = create_tensor(tn(LLM_TENSOR_ATTN_K_B, "weight", i), {n_embd_head_qk_nope, kv_lora_rank, n_head}, 0);
  2812. layer.wv_b = create_tensor(tn(LLM_TENSOR_ATTN_V_B, "weight", i), {kv_lora_rank, n_embd_head_v_mla, n_head}, 0);
  2813. } else {
  2814. layer.wkv_b = create_tensor(tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v_mla)}, 0);
  2815. }
  2816. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_embd_head_v_mla, n_embd}, 0);
  2817. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2818. if (i < (int) hparams.n_layer_dense_lead) {
  2819. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2820. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2821. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2822. } else {
  2823. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2824. layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
  2825. if (n_expert == 0) {
  2826. throw std::runtime_error("n_expert must be > 0");
  2827. }
  2828. if (n_expert_used == 0) {
  2829. throw std::runtime_error("n_expert_used must be > 0");
  2830. }
  2831. // MoE branch
  2832. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2833. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  2834. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2835. // Shared expert branch
  2836. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  2837. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0);
  2838. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  2839. }
  2840. }
  2841. } break;
  2842. case LLM_ARCH_PLM:
  2843. {
  2844. const int64_t n_embd_head_qk_rope = hparams.n_rot;
  2845. const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  2846. const int64_t kv_lora_rank = hparams.n_lora_kv;
  2847. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2848. // output
  2849. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2850. // output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2851. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2852. for (int i = 0; i < n_layer; ++i) {
  2853. auto & layer = layers[i];
  2854. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2855. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2856. layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
  2857. layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
  2858. layer.wkv_b = create_tensor(tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
  2859. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_head * ( n_embd_head_v), n_embd}, 0);
  2860. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2861. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2862. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2863. }
  2864. } break;
  2865. case LLM_ARCH_BITNET:
  2866. {
  2867. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2868. // output
  2869. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2870. for (int i = 0; i < n_layer; ++i) {
  2871. auto & layer = layers[i];
  2872. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2873. layer.attn_sub_norm = create_tensor(tn(LLM_TENSOR_ATTN_SUB_NORM, "weight", i), {n_embd}, 0);
  2874. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2875. layer.wq_scale = create_tensor(tn(LLM_TENSOR_ATTN_Q, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2876. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2877. layer.wk_scale = create_tensor(tn(LLM_TENSOR_ATTN_K, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2878. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2879. layer.wv_scale = create_tensor(tn(LLM_TENSOR_ATTN_V, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2880. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2881. layer.wo_scale = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2882. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2883. layer.ffn_sub_norm = create_tensor(tn(LLM_TENSOR_FFN_SUB_NORM, "weight", i), {n_ff}, 0);
  2884. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2885. layer.ffn_gate_scale = create_tensor(tn(LLM_TENSOR_FFN_GATE, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2886. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2887. layer.ffn_down_scale = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2888. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2889. layer.ffn_up_scale = create_tensor(tn(LLM_TENSOR_FFN_UP, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2890. }
  2891. } break;
  2892. case LLM_ARCH_T5:
  2893. {
  2894. const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
  2895. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2896. // output
  2897. output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2898. output_norm = create_tensor(tn(LLM_TENSOR_DEC_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2899. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2900. // if output is NULL, init from the input tok embed
  2901. if (output == NULL) {
  2902. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2903. }
  2904. for (int i = 0; i < n_layer; ++i) {
  2905. auto & layer = layers[i];
  2906. layer.attn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM, "weight", i), {n_embd}, 0);
  2907. layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
  2908. layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2909. layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2910. layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2911. layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
  2912. layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
  2913. layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
  2914. layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2915. layer.ffn_up_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2916. layer.attn_norm = create_tensor(tn(LLM_TENSOR_DEC_ATTN_NORM, "weight", i), {n_embd}, 0);
  2917. layer.attn_rel_b = create_tensor(tn(LLM_TENSOR_DEC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
  2918. layer.wq = create_tensor(tn(LLM_TENSOR_DEC_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2919. layer.wk = create_tensor(tn(LLM_TENSOR_DEC_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2920. layer.wv = create_tensor(tn(LLM_TENSOR_DEC_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2921. layer.wo = create_tensor(tn(LLM_TENSOR_DEC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
  2922. layer.attn_norm_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_NORM, "weight", i), {n_embd}, 0);
  2923. // this tensor seems to be unused in HF transformers implementation
  2924. layer.attn_rel_b_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
  2925. layer.wq_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2926. layer.wk_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2927. layer.wv_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2928. layer.wo_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
  2929. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_DEC_FFN_NORM, "weight", i), {n_embd}, 0);
  2930. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_DEC_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
  2931. layer.ffn_down = create_tensor(tn(LLM_TENSOR_DEC_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2932. layer.ffn_up = create_tensor(tn(LLM_TENSOR_DEC_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2933. }
  2934. } break;
  2935. case LLM_ARCH_T5ENCODER:
  2936. {
  2937. const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
  2938. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2939. // output
  2940. output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2941. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2942. // if output is NULL, init from the input tok embed
  2943. if (output == NULL) {
  2944. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2945. }
  2946. for (int i = 0; i < n_layer; ++i) {
  2947. auto & layer = layers[i];
  2948. layer.attn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM, "weight", i), {n_embd}, 0);
  2949. layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
  2950. layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2951. layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2952. layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2953. layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
  2954. layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
  2955. layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
  2956. layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2957. layer.ffn_up_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2958. }
  2959. } break;
  2960. case LLM_ARCH_JAIS:
  2961. {
  2962. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2963. // output
  2964. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2965. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2966. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2967. for (int i = 0; i < n_layer; ++i) {
  2968. auto & layer = layers[i];
  2969. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2970. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2971. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2972. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  2973. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2974. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2975. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2976. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2977. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2978. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2979. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2980. layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, 0);
  2981. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2982. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  2983. }
  2984. } break;
  2985. case LLM_ARCH_CHATGLM:
  2986. {
  2987. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2988. // output
  2989. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2990. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2991. // if output is NULL, init from the input tok embed
  2992. if (output == NULL) {
  2993. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2994. }
  2995. for (int i = 0; i < n_layer; ++i) {
  2996. auto & layer = layers[i];
  2997. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2998. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2999. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  3000. if (layer.wqkv == nullptr) {
  3001. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  3002. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  3003. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  3004. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  3005. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  3006. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  3007. }
  3008. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  3009. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3010. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff * 2}, 0);
  3011. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  3012. }
  3013. } break;
  3014. case LLM_ARCH_GLM4:
  3015. {
  3016. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3017. // output
  3018. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3019. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  3020. // if output is NULL, init from the input tok embed
  3021. if (output == NULL) {
  3022. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  3023. }
  3024. for (int i = 0; i < n_layer; ++i) {
  3025. auto & layer = layers[i];
  3026. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3027. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  3028. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  3029. if (layer.wqkv == nullptr) {
  3030. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  3031. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  3032. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  3033. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  3034. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  3035. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  3036. }
  3037. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  3038. layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
  3039. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3040. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  3041. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff * 2}, 0);
  3042. layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
  3043. }
  3044. } break;
  3045. case LLM_ARCH_NEMOTRON:
  3046. {
  3047. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3048. // output
  3049. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3050. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  3051. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3052. for (int i = 0; i < n_layer; ++i) {
  3053. auto & layer = layers[i];
  3054. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3055. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  3056. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  3057. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  3058. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  3059. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  3060. // optional bias tensors
  3061. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  3062. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  3063. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  3064. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  3065. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3066. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  3067. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  3068. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  3069. // optional MLP bias
  3070. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  3071. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  3072. }
  3073. } break;
  3074. case LLM_ARCH_EXAONE:
  3075. {
  3076. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3077. // output
  3078. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3079. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  3080. // if output is NULL, init from the input tok embed
  3081. if (output == NULL) {
  3082. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  3083. }
  3084. for (int i = 0; i < n_layer; ++i) {
  3085. auto & layer = layers[i];
  3086. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3087. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  3088. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  3089. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  3090. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  3091. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3092. layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  3093. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  3094. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  3095. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  3096. }
  3097. } break;
  3098. case LLM_ARCH_RWKV6:
  3099. {
  3100. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3101. // Block 0, LN0
  3102. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
  3103. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
  3104. // output
  3105. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3106. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  3107. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3108. const int time_mix_extra_dim = hparams.time_mix_extra_dim;
  3109. const int time_decay_extra_dim = hparams.time_decay_extra_dim;
  3110. const int head_size = hparams.wkv_head_size;
  3111. const int attn_hidden_size = n_embd;
  3112. const int ffn_size = hparams.n_ff_arr[0];
  3113. for (int i = 0; i < n_layer; ++i) {
  3114. auto & layer = layers[i];
  3115. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3116. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  3117. layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0);
  3118. layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, 0);
  3119. layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
  3120. layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
  3121. layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
  3122. layer.time_mix_lerp_w = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_W, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
  3123. layer.time_mix_lerp_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
  3124. layer.time_mix_lerp_v = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_V, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
  3125. layer.time_mix_lerp_r = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
  3126. layer.time_mix_lerp_g = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_G, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
  3127. layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, TENSOR_NOT_REQUIRED);
  3128. GGML_ASSERT(!(layer.time_mix_lerp_fused == NULL && layer.time_mix_lerp_w == NULL));
  3129. layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, 0);
  3130. layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
  3131. layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
  3132. layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
  3133. layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
  3134. layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3135. layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3136. layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3137. layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0);
  3138. layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0);
  3139. layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
  3140. layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0);
  3141. layer.channel_mix_lerp_r = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, 0);
  3142. layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0);
  3143. layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0);
  3144. layer.channel_mix_receptance = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "weight", i), {n_embd, n_embd}, 0);
  3145. }
  3146. } break;
  3147. case LLM_ARCH_RWKV6QWEN2:
  3148. {
  3149. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3150. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3151. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, TENSOR_NOT_REQUIRED);
  3152. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3153. const int time_mix_extra_dim = hparams.time_mix_extra_dim;
  3154. const int time_decay_extra_dim = hparams.time_decay_extra_dim;
  3155. const int head_size = hparams.wkv_head_size;
  3156. const int attn_hidden_size = n_embd;
  3157. const int n_head_kv = hparams.n_head_kv();
  3158. int attn_key_value_size;
  3159. if (n_head_kv == 0 || attn_hidden_size / head_size == n_head_kv) {
  3160. attn_key_value_size = attn_hidden_size;
  3161. } else {
  3162. attn_key_value_size = n_head_kv * head_size;
  3163. }
  3164. for (int i = 0; i < n_layer; ++i) {
  3165. auto & layer = layers[i];
  3166. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3167. layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
  3168. layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
  3169. layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
  3170. layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0);
  3171. layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, TENSOR_NOT_REQUIRED);
  3172. layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
  3173. layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
  3174. layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
  3175. layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {n_embd, attn_key_value_size}, 0);
  3176. layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {n_embd, attn_key_value_size}, 0);
  3177. layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3178. layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3179. // optional bias tensors
  3180. layer.time_mix_key_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "bias", i), {attn_key_value_size}, TENSOR_NOT_REQUIRED);
  3181. layer.time_mix_value_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "bias", i), {attn_key_value_size}, TENSOR_NOT_REQUIRED);
  3182. layer.time_mix_receptance_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "bias", i), {attn_hidden_size}, TENSOR_NOT_REQUIRED);
  3183. layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
  3184. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3185. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  3186. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  3187. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  3188. }
  3189. } break;
  3190. case LLM_ARCH_RWKV7:
  3191. {
  3192. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3193. // Block 0, LN0
  3194. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
  3195. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
  3196. // output
  3197. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3198. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  3199. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3200. const int n_lora_decay = hparams.n_lora_decay;
  3201. const int n_lora_iclr = hparams.n_lora_iclr;
  3202. const int n_lora_value_res_mix = hparams.n_lora_value_res_mix;
  3203. const int n_lora_gate = hparams.n_lora_gate;
  3204. const int attn_hidden_size = n_embd;
  3205. const int ffn_size = hparams.n_ff_arr[0];
  3206. for (int i = 0; i < n_layer; ++i) {
  3207. auto & layer = layers[i];
  3208. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3209. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  3210. layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0);
  3211. layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, 0);
  3212. layer.time_mix_w0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W0, "weight", i), {n_embd}, 0);
  3213. layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, n_lora_decay}, 0);
  3214. layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {n_lora_decay, n_embd}, 0);
  3215. layer.time_mix_a0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A0, "weight", i), {n_embd}, 0);
  3216. layer.time_mix_a1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A1, "weight", i), {n_embd, n_lora_iclr}, 0);
  3217. layer.time_mix_a2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A2, "weight", i), {n_lora_iclr, n_embd}, 0);
  3218. if (i == 0) {
  3219. // actually not used
  3220. layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
  3221. layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_iclr}, 0);
  3222. layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_iclr, n_embd}, 0);
  3223. } else {
  3224. layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
  3225. layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_value_res_mix}, 0);
  3226. layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_value_res_mix, n_embd}, 0);
  3227. }
  3228. layer.time_mix_g1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G1, "weight", i), {n_embd, n_lora_gate}, 0);
  3229. layer.time_mix_g2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G2, "weight", i), {n_lora_gate, n_embd}, 0);
  3230. layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 6}, 0);
  3231. layer.time_mix_k_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_K, "weight", i), {attn_hidden_size}, 0);
  3232. layer.time_mix_k_a = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_A, "weight", i), {attn_hidden_size}, 0);
  3233. layer.time_mix_r_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_R_K, "weight", i), {attn_hidden_size}, 0);
  3234. layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
  3235. layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3236. layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3237. layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0);
  3238. layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0);
  3239. layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
  3240. layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0);
  3241. layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0);
  3242. layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0);
  3243. }
  3244. } break;
  3245. case LLM_ARCH_ARWKV7:
  3246. {
  3247. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3248. // output
  3249. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3250. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3251. const int n_lora_decay = hparams.n_lora_decay;
  3252. const int n_lora_iclr = hparams.n_lora_iclr;
  3253. const int n_lora_value_res_mix = hparams.n_lora_value_res_mix;
  3254. const int n_lora_gate = hparams.n_lora_gate;
  3255. const int attn_hidden_size = n_embd;
  3256. for (int i = 0; i < n_layer; ++i) {
  3257. auto & layer = layers[i];
  3258. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3259. layer.time_mix_w0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W0, "weight", i), {n_embd}, 0);
  3260. layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, n_lora_decay}, 0);
  3261. layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {n_lora_decay, n_embd}, 0);
  3262. layer.time_mix_a0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A0, "weight", i), {n_embd}, 0);
  3263. layer.time_mix_a1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A1, "weight", i), {n_embd, n_lora_iclr}, 0);
  3264. layer.time_mix_a2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A2, "weight", i), {n_lora_iclr, n_embd}, 0);
  3265. if (i == 0) {
  3266. // actually not used
  3267. layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
  3268. layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_iclr}, 0);
  3269. layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_iclr, n_embd}, 0);
  3270. } else {
  3271. layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
  3272. layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_value_res_mix}, 0);
  3273. layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_value_res_mix, n_embd}, 0);
  3274. }
  3275. layer.time_mix_g1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G1, "weight", i), {n_embd, n_lora_gate}, TENSOR_NOT_REQUIRED);
  3276. layer.time_mix_g2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G2, "weight", i), {n_lora_gate, n_embd}, TENSOR_NOT_REQUIRED);
  3277. try {
  3278. layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 6}, 0);
  3279. } catch(std::runtime_error & e) {
  3280. // ARWKV models may not have gate tensors
  3281. layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0);
  3282. }
  3283. layer.time_mix_k_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_K, "weight", i), {attn_hidden_size}, 0);
  3284. layer.time_mix_k_a = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_A, "weight", i), {attn_hidden_size}, 0);
  3285. layer.time_mix_r_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_R_K, "weight", i), {attn_hidden_size}, 0);
  3286. layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
  3287. layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3288. layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3289. layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  3290. layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  3291. layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
  3292. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3293. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  3294. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  3295. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  3296. }
  3297. } break;
  3298. case LLM_ARCH_CHAMELEON:
  3299. {
  3300. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3301. // output
  3302. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3303. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  3304. // if output is NULL, init from the input tok embed
  3305. if (output == NULL) {
  3306. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  3307. }
  3308. for (int i = 0; i < n_layer; ++i) {
  3309. auto & layer = layers[i];
  3310. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3311. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
  3312. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
  3313. layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd_head_k, n_head}, TENSOR_NOT_REQUIRED);
  3314. layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd_head_k, n_head_kv}, TENSOR_NOT_REQUIRED);
  3315. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  3316. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  3317. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  3318. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  3319. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3320. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  3321. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  3322. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  3323. }
  3324. } break;
  3325. case LLM_ARCH_WAVTOKENIZER_DEC:
  3326. {
  3327. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hparams.n_embd_features, n_vocab}, 0);
  3328. conv1d = create_tensor(tn(LLM_TENSOR_CONV1D, "weight"), {7, hparams.n_embd_features, hparams.posnet.n_embd}, 0);
  3329. conv1d_b = create_tensor(tn(LLM_TENSOR_CONV1D, "bias"), {1, hparams.posnet.n_embd}, 0);
  3330. // posnet
  3331. {
  3332. const int64_t n_embd = hparams.posnet.n_embd;
  3333. for (uint32_t i = 0; i < hparams.posnet.n_layer; ++i) {
  3334. auto & layer = layers[i].posnet;
  3335. // posnet:
  3336. //
  3337. // - resnet
  3338. // - resnet
  3339. // - attn
  3340. // - resnet
  3341. // - resnet
  3342. // - norm
  3343. //
  3344. switch (i) {
  3345. case 0:
  3346. case 1:
  3347. case 3:
  3348. case 4:
  3349. {
  3350. layer.norm1 = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "weight", i), {1, n_embd}, 0);
  3351. layer.norm1_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "bias", i), {1, n_embd}, 0);
  3352. layer.conv1 = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "weight", i), {3, n_embd, n_embd}, 0);
  3353. layer.conv1_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "bias", i), {1, n_embd}, 0);
  3354. layer.norm2 = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "weight", i), {1, n_embd}, 0);
  3355. layer.norm2_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "bias", i), {1, n_embd}, 0);
  3356. layer.conv2 = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "weight", i), {3, n_embd, n_embd}, 0);
  3357. layer.conv2_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "bias", i), {1, n_embd}, 0);
  3358. } break;
  3359. case 2:
  3360. {
  3361. layer.attn_norm = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
  3362. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias", i), {1, n_embd}, 0);
  3363. layer.attn_q = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q, "weight", i), {1, n_embd, n_embd}, 0);
  3364. layer.attn_q_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q, "bias", i), {1, n_embd}, 0);
  3365. layer.attn_k = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K, "weight", i), {1, n_embd, n_embd}, 0);
  3366. layer.attn_k_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K, "bias", i), {1, n_embd}, 0);
  3367. layer.attn_v = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V, "weight", i), {1, n_embd, n_embd}, 0);
  3368. layer.attn_v_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V, "bias", i), {1, n_embd}, 0);
  3369. layer.attn_o = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT, "weight", i), {1, n_embd, n_embd}, 0);
  3370. layer.attn_o_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT, "bias", i), {1, n_embd}, 0);
  3371. } break;
  3372. case 5:
  3373. {
  3374. layer.norm = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
  3375. layer.norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias", i), {1, n_embd}, 0);
  3376. } break;
  3377. default: GGML_ABORT("unknown posnet layer");
  3378. };
  3379. }
  3380. }
  3381. GGML_ASSERT(hparams.posnet.n_embd == hparams.convnext.n_embd);
  3382. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {hparams.posnet.n_embd}, 0);
  3383. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {hparams.posnet.n_embd}, 0);
  3384. // convnext
  3385. {
  3386. const int64_t n_embd = hparams.convnext.n_embd;
  3387. for (uint32_t i = 0; i < hparams.convnext.n_layer; ++i) {
  3388. auto & layer = layers[i].convnext;
  3389. layer.dw = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW, "weight", i), {7, 1, n_embd}, 0);
  3390. layer.dw_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW, "bias", i), {1, n_embd}, 0);
  3391. layer.norm = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM, "weight", i), {n_embd}, 0);
  3392. layer.norm_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM, "bias", i), {n_embd}, 0);
  3393. layer.pw1 = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1, "weight", i), {n_embd, n_ff}, 0);
  3394. layer.pw1_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1, "bias", i), {n_ff}, 0);
  3395. layer.pw2 = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2, "weight", i), {n_ff, n_embd}, 0);
  3396. layer.pw2_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2, "bias", i), {n_embd}, 0);
  3397. layer.gamma = create_tensor(tn(LLM_TENSOR_CONVNEXT_GAMMA, "weight", i), {n_embd}, 0);
  3398. }
  3399. // output
  3400. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3401. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  3402. }
  3403. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hparams.convnext.n_embd, n_embd}, 0);
  3404. output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), {n_embd}, 0);
  3405. } break;
  3406. case LLM_ARCH_BAILINGMOE:
  3407. {
  3408. const int64_t n_ff_exp = hparams.n_ff_exp;
  3409. const int64_t n_expert_shared = hparams.n_expert_shared;
  3410. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3411. // output
  3412. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3413. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3414. for (int i = 0; i < n_layer; ++i) {
  3415. auto & layer = layers[i];
  3416. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3417. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_head * n_rot}, 0);
  3418. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_head_kv * n_rot}, 0);
  3419. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_head_kv * n_rot}, 0);
  3420. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_rot, n_embd}, 0);
  3421. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3422. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  3423. if (n_expert == 0) {
  3424. throw std::runtime_error("n_expert must be > 0");
  3425. }
  3426. if (n_expert_used == 0) {
  3427. throw std::runtime_error("n_expert_used must be > 0");
  3428. }
  3429. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  3430. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  3431. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  3432. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  3433. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0);
  3434. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  3435. }
  3436. } break;
  3437. case LLM_ARCH_DOTS1:
  3438. {
  3439. const int64_t n_ff_exp = hparams.n_ff_exp;
  3440. const int64_t n_expert_shared = hparams.n_expert_shared;
  3441. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3442. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3443. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3444. for (int i = 0; i < n_layer; ++i) {
  3445. auto & layer = layers[i];
  3446. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3447. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  3448. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  3449. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  3450. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  3451. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
  3452. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
  3453. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3454. if (i < (int) hparams.n_layer_dense_lead) {
  3455. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  3456. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  3457. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  3458. } else {
  3459. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  3460. layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
  3461. if (n_expert == 0) {
  3462. throw std::runtime_error("n_expert must be > 0");
  3463. }
  3464. if (n_expert_used == 0) {
  3465. throw std::runtime_error("n_expert_used must be > 0");
  3466. }
  3467. // MoE branch
  3468. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  3469. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  3470. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  3471. // Shared expert branch
  3472. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  3473. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0);
  3474. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  3475. }
  3476. }
  3477. } break;
  3478. default:
  3479. throw std::runtime_error("unknown architecture");
  3480. }
  3481. if (n_moved_tensors > 0) {
  3482. LLAMA_LOG_DEBUG("%s: tensor '%s' (%s) (and %d others) cannot be used with preferred buffer type %s, using %s instead\n",
  3483. __func__, first_moved_tensor->name, ggml_type_name(first_moved_tensor->type), n_moved_tensors - 1,
  3484. ggml_backend_buft_name(first_moved_from_buft), ggml_backend_buft_name(first_moved_to_buft));
  3485. }
  3486. }
  3487. ml.done_getting_tensors();
  3488. ml.init_mappings(true, use_mlock ? &pimpl->mlock_mmaps : nullptr);
  3489. pimpl->mappings.reserve(ml.mappings.size());
  3490. // create the backend buffers
  3491. std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_bufs;
  3492. ctx_bufs.reserve(ctx_map.size());
  3493. // Ensure we have enough capacity for the maximum backend buffer we will potentially create
  3494. const size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
  3495. pimpl->bufs.reserve(n_max_backend_buffer);
  3496. for (auto & it : ctx_map) {
  3497. ggml_backend_buffer_type_t buft = it.first;
  3498. ggml_context * ctx = it.second;
  3499. // skip contexts without tensors
  3500. if (ggml_get_first_tensor(ctx) == nullptr) {
  3501. continue;
  3502. }
  3503. llama_buf_map buf_map;
  3504. buf_map.reserve(n_max_backend_buffer);
  3505. // check if it is possible to use buffer_from_host_ptr with this buffer type
  3506. ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
  3507. if (!dev) {
  3508. // FIXME: workaround for CPU backend buft having a NULL device
  3509. dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
  3510. if (!dev) {
  3511. throw std::runtime_error(format("%s: no CPU backend found", __func__));
  3512. }
  3513. }
  3514. ggml_backend_dev_props props;
  3515. ggml_backend_dev_get_props(dev, &props);
  3516. bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
  3517. bool is_default_buft = buft == ggml_backend_dev_buffer_type(dev);
  3518. if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) {
  3519. for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
  3520. // only the mmap region containing the tensors in the model is mapped to the backend buffer
  3521. // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
  3522. // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
  3523. void * addr = nullptr;
  3524. size_t first, last; // NOLINT
  3525. ml.get_mapping_range(&first, &last, &addr, idx, ctx);
  3526. if (first >= last) {
  3527. continue;
  3528. }
  3529. const size_t max_size = ggml_get_max_tensor_size(ctx);
  3530. ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
  3531. if (buf == nullptr) {
  3532. throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
  3533. }
  3534. pimpl->bufs.emplace_back(buf);
  3535. buf_map.emplace(idx, buf);
  3536. }
  3537. }
  3538. else {
  3539. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  3540. if (buf == nullptr) {
  3541. throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
  3542. }
  3543. pimpl->bufs.emplace_back(buf);
  3544. if (use_mlock && ggml_backend_buffer_is_host(buf)) {
  3545. pimpl->mlock_bufs.emplace_back(new llama_mlock);
  3546. auto & mlock_buf = pimpl->mlock_bufs.back();
  3547. mlock_buf->init (ggml_backend_buffer_get_base(buf));
  3548. mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
  3549. }
  3550. for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
  3551. buf_map.emplace(idx, buf);
  3552. }
  3553. }
  3554. if (pimpl->bufs.empty()) {
  3555. throw std::runtime_error("failed to allocate buffer");
  3556. }
  3557. for (auto & buf : buf_map) {
  3558. // indicate that this buffer contains weights
  3559. // this is used by ggml_backend_sched to improve op scheduling: ops that use a weight are preferably scheduled to the backend that contains the weight
  3560. ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  3561. }
  3562. ctx_bufs.emplace_back(ctx, buf_map);
  3563. }
  3564. if (llama_supports_gpu_offload()) {
  3565. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  3566. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  3567. if (n_gpu_layers > (int) hparams.n_layer) {
  3568. LLAMA_LOG_INFO("%s: offloading output layer to GPU\n", __func__);
  3569. }
  3570. const int max_backend_supported_layers = hparams.n_layer + 1;
  3571. const int max_offloadable_layers = hparams.n_layer + 1;
  3572. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  3573. }
  3574. // print memory requirements per buffer type
  3575. for (auto & buf : pimpl->bufs) {
  3576. LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get()) / 1024.0 / 1024.0);
  3577. }
  3578. // populate tensors_by_name
  3579. for (auto & ctx : pimpl->ctxs) {
  3580. for (auto * cur = ggml_get_first_tensor(ctx.get()); cur != NULL; cur = ggml_get_next_tensor(ctx.get(), cur)) {
  3581. tensors_by_name.emplace_back(ggml_get_name(cur), cur);
  3582. }
  3583. }
  3584. // load tensor data
  3585. for (auto & it : ctx_bufs) {
  3586. ggml_context * ctx = it.first;
  3587. auto & bufs = it.second;
  3588. if (!ml.load_all_data(ctx, bufs, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) {
  3589. return false;
  3590. }
  3591. }
  3592. if (use_mmap_buffer) {
  3593. for (auto & mapping : ml.mappings) {
  3594. pimpl->mappings.emplace_back(std::move(mapping));
  3595. }
  3596. }
  3597. return true;
  3598. }
  3599. std::string llama_model::arch_name() const {
  3600. return llm_arch_name(arch);
  3601. }
  3602. std::string llama_model::type_name() const {
  3603. return llm_type_name(type);
  3604. }
  3605. std::string llama_model::desc() const {
  3606. return pimpl->desc_str;
  3607. }
  3608. size_t llama_model::size() const {
  3609. return pimpl->n_bytes;
  3610. }
  3611. size_t llama_model::n_tensors() const {
  3612. return tensors_by_name.size();
  3613. }
  3614. size_t llama_model::n_devices() const {
  3615. return devices.size();
  3616. }
  3617. uint64_t llama_model::n_elements() const {
  3618. return pimpl->n_elements;
  3619. }
  3620. void llama_model::print_info() const {
  3621. const std::string rope_scaling_type = llama_rope_scaling_type_name(hparams.rope_scaling_type_train);
  3622. auto print_f = [](const std::function<uint32_t(uint32_t)> & f, uint32_t n) {
  3623. bool is_var = false;
  3624. std::vector<uint32_t> v;
  3625. for (uint32_t i = 0; i < n; ++i) {
  3626. v.push_back(f(i));
  3627. if (v[i] != v[0]) {
  3628. is_var = true;
  3629. }
  3630. }
  3631. std::stringstream ss;
  3632. if (is_var) {
  3633. ss << "[";
  3634. for (uint32_t i = 0; i < n; ++i) {
  3635. ss << v[i];
  3636. if (i < n - 1) {
  3637. ss << ", ";
  3638. }
  3639. }
  3640. ss << "]";
  3641. } else {
  3642. ss << v[0];
  3643. }
  3644. return ss.str();
  3645. };
  3646. // hparams
  3647. LLAMA_LOG_INFO("%s: arch = %s\n", __func__, arch_name().c_str());
  3648. LLAMA_LOG_INFO("%s: vocab_only = %d\n", __func__, hparams.vocab_only);
  3649. if (!hparams.vocab_only) {
  3650. LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
  3651. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  3652. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  3653. LLAMA_LOG_INFO("%s: n_head = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head(il); }, hparams.n_layer).c_str());
  3654. LLAMA_LOG_INFO("%s: n_head_kv = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str());
  3655. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot);
  3656. LLAMA_LOG_INFO("%s: n_swa = %u\n", __func__, hparams.n_swa);
  3657. LLAMA_LOG_INFO("%s: is_swa_any = %u\n", __func__, hparams.is_swa_any());
  3658. LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k);
  3659. LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v);
  3660. LLAMA_LOG_INFO("%s: n_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il); }, hparams.n_layer).c_str());
  3661. LLAMA_LOG_INFO("%s: n_embd_k_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_embd_k_gqa(il); }, hparams.n_layer).c_str());
  3662. LLAMA_LOG_INFO("%s: n_embd_v_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_embd_v_gqa(il); }, hparams.n_layer).c_str());
  3663. LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
  3664. LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
  3665. LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
  3666. LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
  3667. LLAMA_LOG_INFO("%s: f_logit_scale = %.1e\n", __func__, hparams.f_logit_scale);
  3668. LLAMA_LOG_INFO("%s: f_attn_scale = %.1e\n", __func__, hparams.f_attention_scale);
  3669. LLAMA_LOG_INFO("%s: n_ff = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str());
  3670. LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
  3671. LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
  3672. LLAMA_LOG_INFO("%s: causal attn = %d\n", __func__, hparams.causal_attn);
  3673. LLAMA_LOG_INFO("%s: pooling type = %d\n", __func__, hparams.pooling_type);
  3674. LLAMA_LOG_INFO("%s: rope type = %d\n", __func__, hparams.rope_type);
  3675. LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type.c_str());
  3676. LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
  3677. LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
  3678. LLAMA_LOG_INFO("%s: n_ctx_orig_yarn = %u\n", __func__, hparams.n_ctx_orig_yarn);
  3679. LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
  3680. LLAMA_LOG_INFO("%s: ssm_d_conv = %u\n", __func__, hparams.ssm_d_conv);
  3681. LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner);
  3682. LLAMA_LOG_INFO("%s: ssm_d_state = %u\n", __func__, hparams.ssm_d_state);
  3683. LLAMA_LOG_INFO("%s: ssm_dt_rank = %u\n", __func__, hparams.ssm_dt_rank);
  3684. LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms = %d\n", __func__, hparams.ssm_dt_b_c_rms);
  3685. if (!classifier_labels.empty()) {
  3686. LLAMA_LOG_INFO("%s: n_cls_out = %u\n", __func__, hparams.n_cls_out);
  3687. size_t i = 0;
  3688. for (auto label : classifier_labels) {
  3689. LLAMA_LOG_INFO("%s: cls_label[%2zu] = %s\n", __func__, i++, label.c_str());
  3690. }
  3691. }
  3692. }
  3693. LLAMA_LOG_INFO("%s: model type = %s\n", __func__, type_name().c_str());
  3694. if (pimpl->n_elements >= 1e12) {
  3695. LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, pimpl->n_elements*1e-12);
  3696. } else if (pimpl->n_elements >= 1e9) {
  3697. LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, pimpl->n_elements*1e-9);
  3698. } else if (pimpl->n_elements >= 1e6) {
  3699. LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, pimpl->n_elements*1e-6);
  3700. } else {
  3701. LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, pimpl->n_elements*1e-3);
  3702. }
  3703. // general kv
  3704. LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, name.c_str());
  3705. if (arch == LLM_ARCH_DEEPSEEK) {
  3706. LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead);
  3707. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  3708. LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared);
  3709. LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale);
  3710. }
  3711. if (arch == LLM_ARCH_DEEPSEEK2) {
  3712. LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead);
  3713. LLAMA_LOG_INFO("%s: n_lora_q = %d\n", __func__, hparams.n_lora_q);
  3714. LLAMA_LOG_INFO("%s: n_lora_kv = %d\n", __func__, hparams.n_lora_kv);
  3715. LLAMA_LOG_INFO("%s: n_embd_head_k_mla = %d\n", __func__, hparams.n_embd_head_k_mla);
  3716. LLAMA_LOG_INFO("%s: n_embd_head_v_mla = %d\n", __func__, hparams.n_embd_head_v_mla);
  3717. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  3718. LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared);
  3719. LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale);
  3720. LLAMA_LOG_INFO("%s: expert_weights_norm = %d\n", __func__, hparams.expert_weights_norm);
  3721. LLAMA_LOG_INFO("%s: expert_gating_func = %s\n", __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func));
  3722. LLAMA_LOG_INFO("%s: rope_yarn_log_mul = %.4f\n", __func__, hparams.rope_yarn_log_mul);
  3723. }
  3724. if (arch == LLM_ARCH_QWEN2MOE) {
  3725. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  3726. LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp);
  3727. }
  3728. if (arch == LLM_ARCH_QWEN3MOE) {
  3729. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  3730. }
  3731. if (arch == LLM_ARCH_MINICPM ||
  3732. arch == LLM_ARCH_GRANITE ||
  3733. arch == LLM_ARCH_GRANITE_MOE) {
  3734. LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
  3735. LLAMA_LOG_INFO("%s: f_residual_scale = %f\n", __func__, hparams.f_residual_scale);
  3736. LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
  3737. LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp);
  3738. }
  3739. if (arch == LLM_ARCH_BAILINGMOE) {
  3740. LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead);
  3741. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  3742. LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared);
  3743. LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale);
  3744. LLAMA_LOG_INFO("%s: expert_weights_norm = %d\n", __func__, hparams.expert_weights_norm);
  3745. }
  3746. vocab.print_info();
  3747. }
  3748. ggml_backend_dev_t llama_model::dev_layer(int il) const {
  3749. return pimpl->dev_layer.at(il).dev;
  3750. }
  3751. ggml_backend_dev_t llama_model::dev_output() const {
  3752. return pimpl->dev_output.dev;
  3753. }
  3754. template<typename F>
  3755. static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) {
  3756. ggml_init_params params = {
  3757. /*.mem_size =*/ ggml_tensor_overhead()*8,
  3758. /*.mem_buffer =*/ NULL,
  3759. /*.no_alloc =*/ true,
  3760. };
  3761. ggml_context_ptr ctx { ggml_init(params) };
  3762. if (!ctx) {
  3763. throw std::runtime_error(format("failed to create ggml context"));
  3764. }
  3765. ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) };
  3766. ggml_tensor * op_tensor = fn(ctx.get());
  3767. for (int i = 0; i < GGML_MAX_SRC; i++) {
  3768. if (op_tensor->src[i] != nullptr) {
  3769. assert(op_tensor->src[i]->buffer == nullptr);
  3770. op_tensor->src[i]->buffer = buf.get();
  3771. }
  3772. }
  3773. bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
  3774. return op_supported;
  3775. }
  3776. template<typename F>
  3777. static ggml_backend_buffer_type_t select_buft(const buft_list_t & buft_list, const F & fn) {
  3778. for (const auto & cur : buft_list) {
  3779. ggml_backend_dev_t cur_dev = cur.first;
  3780. ggml_backend_buffer_type_t cur_buft = cur.second;
  3781. if (buft_supported(cur_buft, cur_dev, fn)) {
  3782. return cur_buft;
  3783. }
  3784. }
  3785. throw std::runtime_error(format("no suitable buffer type found"));
  3786. }
  3787. ggml_backend_buffer_type_t llama_model::select_buft(int il) const {
  3788. return ::select_buft(
  3789. *pimpl->dev_layer.at(il).buft_list,
  3790. [&](ggml_context * ctx) {
  3791. ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
  3792. ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
  3793. return ggml_add(ctx, cur, layer_dir);
  3794. });
  3795. }
  3796. bool llama_model::has_tensor_overrides() const {
  3797. return pimpl->has_tensor_overrides;
  3798. }
  3799. const ggml_tensor * llama_model::get_tensor(const char * name) const {
  3800. auto it = std::find_if(tensors_by_name.begin(), tensors_by_name.end(),
  3801. [name](const std::pair<std::string, ggml_tensor *> & it) {
  3802. return it.first == name;
  3803. });
  3804. if (it == tensors_by_name.end()) {
  3805. return nullptr;
  3806. }
  3807. return it->second;
  3808. }
  3809. float llama_model::get_rope_freq_base (const llama_cparams & cparams, int il) const {
  3810. return hparams.is_swa(il) ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base;
  3811. }
  3812. float llama_model::get_rope_freq_scale(const llama_cparams & cparams, int il) const {
  3813. return hparams.is_swa(il) ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale;
  3814. }
  3815. ggml_tensor * llama_model::get_rope_factors(const llama_cparams & cparams, int il) const {
  3816. const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max;
  3817. // choose long/short freq factors based on the context size
  3818. if (layers[il].rope_freqs != nullptr) {
  3819. return layers[il].rope_freqs;
  3820. }
  3821. if (n_ctx_per_seq > hparams.n_ctx_orig_yarn) {
  3822. return layers[il].rope_long;
  3823. }
  3824. return layers[il].rope_short;
  3825. }
  3826. struct llm_build_llama : public llm_graph_context {
  3827. llm_build_llama(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  3828. const int64_t n_embd_head = hparams.n_embd_head_v;
  3829. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  3830. GGML_ASSERT(n_embd_head == hparams.n_rot);
  3831. ggml_tensor * cur;
  3832. ggml_tensor * inpL;
  3833. inpL = build_inp_embd(model.tok_embd);
  3834. // inp_pos - contains the positions
  3835. ggml_tensor * inp_pos = build_inp_pos();
  3836. auto * inp_attn = build_attn_inp_kv_unified();
  3837. const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
  3838. for (int il = 0; il < n_layer; ++il) {
  3839. ggml_tensor * inpSA = inpL;
  3840. // norm
  3841. cur = build_norm(inpL,
  3842. model.layers[il].attn_norm, NULL,
  3843. LLM_NORM_RMS, il);
  3844. cb(cur, "attn_norm", il);
  3845. // self-attention
  3846. {
  3847. // rope freq factors for llama3; may return nullptr for llama2 and other models
  3848. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  3849. // compute Q and K and RoPE them
  3850. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  3851. cb(Qcur, "Qcur", il);
  3852. if (model.layers[il].bq) {
  3853. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  3854. cb(Qcur, "Qcur", il);
  3855. }
  3856. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  3857. cb(Kcur, "Kcur", il);
  3858. if (model.layers[il].bk) {
  3859. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  3860. cb(Kcur, "Kcur", il);
  3861. }
  3862. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  3863. cb(Vcur, "Vcur", il);
  3864. if (model.layers[il].bv) {
  3865. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  3866. cb(Vcur, "Vcur", il);
  3867. }
  3868. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3869. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  3870. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  3871. Qcur = ggml_rope_ext(
  3872. ctx0, Qcur, inp_pos, rope_factors,
  3873. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  3874. ext_factor, attn_factor, beta_fast, beta_slow
  3875. );
  3876. Kcur = ggml_rope_ext(
  3877. ctx0, Kcur, inp_pos, rope_factors,
  3878. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  3879. ext_factor, attn_factor, beta_fast, beta_slow
  3880. );
  3881. cb(Qcur, "Qcur", il);
  3882. cb(Kcur, "Kcur", il);
  3883. cb(Vcur, "Vcur", il);
  3884. cur = build_attn(inp_attn, gf,
  3885. model.layers[il].wo, model.layers[il].bo,
  3886. Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il);
  3887. cb(cur, "attn_out", il);
  3888. }
  3889. if (il == n_layer - 1) {
  3890. // skip computing output for unused tokens
  3891. ggml_tensor * inp_out_ids = build_inp_out_ids();
  3892. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  3893. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  3894. }
  3895. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  3896. cb(ffn_inp, "ffn_inp", il);
  3897. // feed-forward network (non-MoE)
  3898. if (model.layers[il].ffn_gate_inp == nullptr) {
  3899. cur = build_norm(ffn_inp,
  3900. model.layers[il].ffn_norm, NULL,
  3901. LLM_NORM_RMS, il);
  3902. cb(cur, "ffn_norm", il);
  3903. cur = build_ffn(cur,
  3904. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  3905. model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
  3906. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  3907. NULL,
  3908. LLM_FFN_SILU, LLM_FFN_PAR, il);
  3909. cb(cur, "ffn_out", il);
  3910. } else {
  3911. // MoE branch
  3912. cur = build_norm(ffn_inp,
  3913. model.layers[il].ffn_norm, NULL,
  3914. LLM_NORM_RMS, il);
  3915. cb(cur, "ffn_norm", il);
  3916. cur = build_moe_ffn(cur,
  3917. model.layers[il].ffn_gate_inp,
  3918. model.layers[il].ffn_up_exps,
  3919. model.layers[il].ffn_gate_exps,
  3920. model.layers[il].ffn_down_exps,
  3921. nullptr,
  3922. n_expert, n_expert_used,
  3923. LLM_FFN_SILU, true,
  3924. false, 0.0,
  3925. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  3926. il);
  3927. cb(cur, "ffn_moe_out", il);
  3928. }
  3929. cur = ggml_add(ctx0, cur, ffn_inp);
  3930. cb(cur, "ffn_out", il);
  3931. cur = build_cvec(cur, il);
  3932. cb(cur, "l_out", il);
  3933. // input for next layer
  3934. inpL = cur;
  3935. }
  3936. cur = inpL;
  3937. cur = build_norm(cur,
  3938. model.output_norm, NULL,
  3939. LLM_NORM_RMS, -1);
  3940. cb(cur, "result_norm", -1);
  3941. res->t_embd = cur;
  3942. // lm_head
  3943. cur = build_lora_mm(model.output, cur);
  3944. cb(cur, "result_output", -1);
  3945. res->t_logits = cur;
  3946. ggml_build_forward_expand(gf, cur);
  3947. }
  3948. };
  3949. struct llm_build_llama_iswa : public llm_graph_context {
  3950. llm_build_llama_iswa(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  3951. const int64_t n_embd_head = hparams.n_embd_head_v;
  3952. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  3953. GGML_ASSERT(n_embd_head == hparams.n_rot);
  3954. ggml_tensor * cur;
  3955. ggml_tensor * inpL;
  3956. inpL = build_inp_embd(model.tok_embd);
  3957. // inp_pos - contains the positions
  3958. ggml_tensor * inp_pos = build_inp_pos();
  3959. // temperature tuning
  3960. ggml_tensor * inp_attn_scale = nullptr;
  3961. inp_attn_scale = build_inp_attn_scale();
  3962. auto * inp_attn = build_attn_inp_kv_unified_iswa();
  3963. const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
  3964. for (int il = 0; il < n_layer; ++il) {
  3965. ggml_tensor * inpSA = inpL;
  3966. const bool use_rope = (il + 1) % hparams.n_no_rope_layer_step != 0;
  3967. // norm
  3968. cur = build_norm(inpL,
  3969. model.layers[il].attn_norm, NULL,
  3970. LLM_NORM_RMS, il);
  3971. cb(cur, "attn_norm", il);
  3972. // self-attention
  3973. {
  3974. // rope freq factors for llama3; may return nullptr for llama2 and other models
  3975. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  3976. // compute Q and K and RoPE them
  3977. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  3978. cb(Qcur, "Qcur", il);
  3979. if (model.layers[il].bq) {
  3980. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  3981. cb(Qcur, "Qcur", il);
  3982. }
  3983. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  3984. cb(Kcur, "Kcur", il);
  3985. if (model.layers[il].bk) {
  3986. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  3987. cb(Kcur, "Kcur", il);
  3988. }
  3989. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  3990. cb(Vcur, "Vcur", il);
  3991. if (model.layers[il].bv) {
  3992. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  3993. cb(Vcur, "Vcur", il);
  3994. }
  3995. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3996. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  3997. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  3998. if (use_rope) {
  3999. Qcur = ggml_rope_ext(
  4000. ctx0, Qcur, inp_pos, rope_factors,
  4001. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4002. ext_factor, attn_factor, beta_fast, beta_slow
  4003. );
  4004. Kcur = ggml_rope_ext(
  4005. ctx0, Kcur, inp_pos, rope_factors,
  4006. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4007. ext_factor, attn_factor, beta_fast, beta_slow
  4008. );
  4009. } else if (inp_attn_scale) {
  4010. Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
  4011. }
  4012. cb(Qcur, "Qcur", il);
  4013. cb(Kcur, "Kcur", il);
  4014. cb(Vcur, "Vcur", il);
  4015. if (use_rope && hparams.use_kq_norm) {
  4016. // Llama4TextL2Norm
  4017. Qcur = ggml_rms_norm(ctx0, Qcur, hparams.f_norm_rms_eps);
  4018. Kcur = ggml_rms_norm(ctx0, Kcur, hparams.f_norm_rms_eps);
  4019. cb(Qcur, "Qcur_normed", il);
  4020. cb(Kcur, "Kcur_normed", il);
  4021. }
  4022. cur = build_attn(inp_attn, gf,
  4023. model.layers[il].wo, model.layers[il].bo,
  4024. Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il);
  4025. cb(cur, "attn_out", il);
  4026. }
  4027. if (il == n_layer - 1) {
  4028. // skip computing output for unused tokens
  4029. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4030. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4031. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4032. }
  4033. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4034. cb(ffn_inp, "ffn_inp", il);
  4035. // feed-forward network (non-MoE)
  4036. if (model.layers[il].ffn_gate_inp == nullptr) {
  4037. cur = build_norm(ffn_inp,
  4038. model.layers[il].ffn_norm, NULL,
  4039. LLM_NORM_RMS, il);
  4040. cb(cur, "ffn_norm", il);
  4041. cur = build_ffn(cur,
  4042. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  4043. model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
  4044. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  4045. NULL,
  4046. LLM_FFN_SILU, LLM_FFN_PAR, il);
  4047. cb(cur, "ffn_out", il);
  4048. } else {
  4049. ggml_tensor * ffn_inp_normed = build_norm(ffn_inp,
  4050. model.layers[il].ffn_norm, NULL,
  4051. LLM_NORM_RMS, il);
  4052. cb(cur, "ffn_norm", il);
  4053. ggml_tensor * moe_out = build_moe_ffn(ffn_inp_normed,
  4054. model.layers[il].ffn_gate_inp,
  4055. model.layers[il].ffn_up_exps,
  4056. model.layers[il].ffn_gate_exps,
  4057. model.layers[il].ffn_down_exps,
  4058. nullptr,
  4059. n_expert, n_expert_used,
  4060. LLM_FFN_SILU, false,
  4061. false, 0.0,
  4062. LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID,
  4063. il);
  4064. // Shared experts
  4065. ggml_tensor * shexp_out = build_ffn(ffn_inp_normed,
  4066. model.layers[il].ffn_up_shexp, NULL, NULL,
  4067. model.layers[il].ffn_gate_shexp, NULL, NULL,
  4068. model.layers[il].ffn_down_shexp, NULL, NULL,
  4069. NULL,
  4070. LLM_FFN_SILU, LLM_FFN_PAR, il);
  4071. cb(shexp_out, "ffn_moe_shexp", il);
  4072. cur = ggml_add(ctx0, moe_out, shexp_out);
  4073. cb(cur, "ffn_moe_out_merged", il);
  4074. }
  4075. cur = ggml_add(ctx0, cur, ffn_inp);
  4076. cb(cur, "ffn_out", il);
  4077. cur = build_cvec(cur, il);
  4078. cb(cur, "l_out", il);
  4079. // input for next layer
  4080. inpL = cur;
  4081. }
  4082. cur = inpL;
  4083. cur = build_norm(cur,
  4084. model.output_norm, NULL,
  4085. LLM_NORM_RMS, -1);
  4086. cb(cur, "result_norm", -1);
  4087. res->t_embd = cur;
  4088. // lm_head
  4089. cur = build_lora_mm(model.output, cur);
  4090. cb(cur, "result_output", -1);
  4091. res->t_logits = cur;
  4092. ggml_build_forward_expand(gf, cur);
  4093. }
  4094. };
  4095. struct llm_build_deci : public llm_graph_context {
  4096. llm_build_deci(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4097. const int64_t n_embd_head = hparams.n_embd_head_v;
  4098. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4099. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4100. ggml_tensor * cur;
  4101. ggml_tensor * inpL;
  4102. inpL = build_inp_embd(model.tok_embd);
  4103. // inp_pos - contains the positions
  4104. ggml_tensor * inp_pos = build_inp_pos();
  4105. auto * inp_attn = build_attn_inp_kv_unified();
  4106. const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
  4107. for (int il = 0; il < n_layer; ++il) {
  4108. ggml_tensor * inpSA = inpL;
  4109. const int64_t n_head_kv = hparams.n_head_kv(il);
  4110. const int64_t n_head = hparams.n_head(il);
  4111. const int64_t n_ff = hparams.n_ff(il);
  4112. if (n_head == 0) {
  4113. // attention-free layer of Llama-3_1-Nemotron-51B
  4114. cur = inpL;
  4115. } else {
  4116. // norm
  4117. cur = build_norm(inpL,
  4118. model.layers[il].attn_norm, NULL,
  4119. LLM_NORM_RMS, il);
  4120. cb(cur, "attn_norm", il);
  4121. }
  4122. if (n_head > 0 && n_head_kv == 0) {
  4123. // "linear attention" of Llama-3_1-Nemotron-51B
  4124. cur = build_lora_mm(model.layers[il].wo, cur);
  4125. cb(cur, "wo", il);
  4126. } else if (n_head > 0) {
  4127. // self-attention
  4128. // rope freq factors for llama3; may return nullptr for llama2 and other models
  4129. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  4130. // compute Q and K and RoPE them
  4131. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  4132. cb(Qcur, "Qcur", il);
  4133. if (model.layers[il].bq) {
  4134. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  4135. cb(Qcur, "Qcur", il);
  4136. }
  4137. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  4138. cb(Kcur, "Kcur", il);
  4139. if (model.layers[il].bk) {
  4140. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  4141. cb(Kcur, "Kcur", il);
  4142. }
  4143. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  4144. cb(Vcur, "Vcur", il);
  4145. if (model.layers[il].bv) {
  4146. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  4147. cb(Vcur, "Vcur", il);
  4148. }
  4149. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4150. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4151. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4152. Qcur = ggml_rope_ext(
  4153. ctx0, Qcur, inp_pos, rope_factors,
  4154. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4155. ext_factor, attn_factor, beta_fast, beta_slow
  4156. );
  4157. Kcur = ggml_rope_ext(
  4158. ctx0, Kcur, inp_pos, rope_factors,
  4159. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4160. ext_factor, attn_factor, beta_fast, beta_slow
  4161. );
  4162. cb(Qcur, "Qcur", il);
  4163. cb(Kcur, "Kcur", il);
  4164. cb(Vcur, "Vcur", il);
  4165. cur = build_attn(inp_attn, gf,
  4166. model.layers[il].wo, model.layers[il].bo,
  4167. Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il);
  4168. }
  4169. if (il == n_layer - 1) {
  4170. // skip computing output for unused tokens
  4171. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4172. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4173. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4174. }
  4175. // FFN-free layer of Llama-3_1-Nemotron-Ultra-253B
  4176. if (n_ff == 0) {
  4177. continue;
  4178. }
  4179. // modified to support attention-free layer of Llama-3_1-Nemotron-51B
  4180. ggml_tensor * ffn_inp = cur;
  4181. if (n_head > 0) {
  4182. ffn_inp = ggml_add(ctx0, cur, inpSA);
  4183. cb(ffn_inp, "ffn_inp", il);
  4184. }
  4185. // feed-forward network
  4186. if (model.layers[il].ffn_gate_inp == nullptr) {
  4187. cur = build_norm(ffn_inp,
  4188. model.layers[il].ffn_norm, NULL,
  4189. LLM_NORM_RMS, il);
  4190. cb(cur, "ffn_norm", il);
  4191. cur = build_ffn(cur,
  4192. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  4193. model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
  4194. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  4195. NULL,
  4196. LLM_FFN_SILU, LLM_FFN_PAR, il);
  4197. cb(cur, "ffn_out", il);
  4198. }
  4199. cur = ggml_add(ctx0, cur, ffn_inp);
  4200. cb(cur, "ffn_out", il);
  4201. cur = build_cvec(cur, il);
  4202. cb(cur, "l_out", il);
  4203. // input for next layer
  4204. inpL = cur;
  4205. }
  4206. cur = inpL;
  4207. cur = build_norm(cur,
  4208. model.output_norm, NULL,
  4209. LLM_NORM_RMS, -1);
  4210. cb(cur, "result_norm", -1);
  4211. res->t_embd = cur;
  4212. // lm_head
  4213. cur = build_lora_mm(model.output, cur);
  4214. cb(cur, "result_output", -1);
  4215. res->t_logits = cur;
  4216. ggml_build_forward_expand(gf, cur);
  4217. }
  4218. };
  4219. struct llm_build_baichuan : public llm_graph_context {
  4220. llm_build_baichuan(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4221. const int64_t n_embd_head = hparams.n_embd_head_v;
  4222. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4223. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4224. ggml_tensor * cur;
  4225. ggml_tensor * inpL;
  4226. inpL = build_inp_embd(model.tok_embd);
  4227. // inp_pos - contains the positions
  4228. ggml_tensor * inp_pos = model.type == LLM_TYPE_7B ? build_inp_pos() : nullptr;
  4229. auto * inp_attn = build_attn_inp_kv_unified();
  4230. for (int il = 0; il < n_layer; ++il) {
  4231. ggml_tensor * inpSA = inpL;
  4232. cur = build_norm(inpL,
  4233. model.layers[il].attn_norm, NULL,
  4234. LLM_NORM_RMS, il);
  4235. cb(cur, "attn_norm", il);
  4236. // self-attention
  4237. {
  4238. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  4239. cb(Qcur, "Qcur", il);
  4240. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  4241. cb(Kcur, "Kcur", il);
  4242. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  4243. cb(Vcur, "Vcur", il);
  4244. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4245. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4246. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4247. switch (model.type) {
  4248. case LLM_TYPE_7B:
  4249. Qcur = ggml_rope_ext(
  4250. ctx0, Qcur, inp_pos, nullptr,
  4251. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4252. ext_factor, attn_factor, beta_fast, beta_slow
  4253. );
  4254. Kcur = ggml_rope_ext(
  4255. ctx0, Kcur, inp_pos, nullptr,
  4256. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4257. ext_factor, attn_factor, beta_fast, beta_slow
  4258. );
  4259. break;
  4260. case LLM_TYPE_13B:
  4261. break;
  4262. default:
  4263. GGML_ABORT("fatal error");
  4264. }
  4265. cb(Qcur, "Qcur", il);
  4266. cb(Kcur, "Kcur", il);
  4267. cb(Vcur, "Vcur", il);
  4268. cur = build_attn(inp_attn, gf,
  4269. model.layers[il].wo, NULL,
  4270. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4271. }
  4272. if (il == n_layer - 1) {
  4273. // skip computing output for unused tokens
  4274. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4275. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4276. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4277. }
  4278. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4279. cb(ffn_inp, "ffn_inp", il);
  4280. // feed-forward network
  4281. {
  4282. cur = build_norm(ffn_inp,
  4283. model.layers[il].ffn_norm, NULL,
  4284. LLM_NORM_RMS, il);
  4285. cb(cur, "ffn_norm", il);
  4286. cur = build_ffn(cur,
  4287. model.layers[il].ffn_up, NULL, NULL,
  4288. model.layers[il].ffn_gate, NULL, NULL,
  4289. model.layers[il].ffn_down, NULL, NULL,
  4290. NULL,
  4291. LLM_FFN_SILU, LLM_FFN_PAR, il);
  4292. cb(cur, "ffn_out", il);
  4293. }
  4294. cur = ggml_add(ctx0, cur, ffn_inp);
  4295. cur = build_cvec(cur, il);
  4296. cb(cur, "l_out", il);
  4297. // input for next layer
  4298. inpL = cur;
  4299. }
  4300. cur = inpL;
  4301. cur = build_norm(cur,
  4302. model.output_norm, NULL,
  4303. LLM_NORM_RMS, -1);
  4304. cb(cur, "result_norm", -1);
  4305. res->t_embd = cur;
  4306. // lm_head
  4307. cur = build_lora_mm(model.output, cur);
  4308. cb(cur, "result_output", -1);
  4309. res->t_logits = cur;
  4310. ggml_build_forward_expand(gf, cur);
  4311. }
  4312. };
  4313. struct llm_build_xverse : public llm_graph_context {
  4314. llm_build_xverse(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4315. const int64_t n_embd_head = hparams.n_embd_head_v;
  4316. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4317. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4318. ggml_tensor * cur;
  4319. ggml_tensor * inpL;
  4320. inpL = build_inp_embd(model.tok_embd);
  4321. // inp_pos - contains the positions
  4322. ggml_tensor * inp_pos = build_inp_pos();
  4323. auto * inp_attn = build_attn_inp_kv_unified();
  4324. for (int il = 0; il < n_layer; ++il) {
  4325. ggml_tensor * inpSA = inpL;
  4326. cur = build_norm(inpL,
  4327. model.layers[il].attn_norm, NULL,
  4328. LLM_NORM_RMS, il);
  4329. cb(cur, "attn_norm", il);
  4330. // self-attention
  4331. {
  4332. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  4333. cb(Qcur, "Qcur", il);
  4334. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  4335. cb(Kcur, "Kcur", il);
  4336. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  4337. cb(Vcur, "Vcur", il);
  4338. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4339. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4340. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4341. Qcur = ggml_rope_ext(
  4342. ctx0, Qcur, inp_pos, nullptr,
  4343. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4344. ext_factor, attn_factor, beta_fast, beta_slow
  4345. );
  4346. Kcur = ggml_rope_ext(
  4347. ctx0, Kcur, inp_pos, nullptr,
  4348. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4349. ext_factor, attn_factor, beta_fast, beta_slow
  4350. );
  4351. cb(Qcur, "Qcur", il);
  4352. cb(Kcur, "Kcur", il);
  4353. cb(Vcur, "Vcur", il);
  4354. cur = build_attn(inp_attn, gf,
  4355. model.layers[il].wo, NULL,
  4356. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4357. }
  4358. if (il == n_layer - 1) {
  4359. // skip computing output for unused tokens
  4360. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4361. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4362. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4363. }
  4364. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4365. cb(ffn_inp, "ffn_inp", il);
  4366. // feed-forward network
  4367. {
  4368. cur = build_norm(ffn_inp,
  4369. model.layers[il].ffn_norm, NULL,
  4370. LLM_NORM_RMS, il);
  4371. cb(cur, "ffn_norm", il);
  4372. cur = build_ffn(cur,
  4373. model.layers[il].ffn_up, NULL, NULL,
  4374. model.layers[il].ffn_gate, NULL, NULL,
  4375. model.layers[il].ffn_down, NULL, NULL,
  4376. NULL,
  4377. LLM_FFN_SILU, LLM_FFN_PAR, il);
  4378. cb(cur, "ffn_out", il);
  4379. }
  4380. cur = ggml_add(ctx0, cur, ffn_inp);
  4381. cur = build_cvec(cur, il);
  4382. cb(cur, "l_out", il);
  4383. // input for next layer
  4384. inpL = cur;
  4385. }
  4386. cur = inpL;
  4387. cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
  4388. cb(cur, "result_norm", -1);
  4389. res->t_embd = cur;
  4390. // lm_head
  4391. cur = build_lora_mm(model.output, cur);
  4392. cb(cur, "result_output", -1);
  4393. res->t_logits = cur;
  4394. ggml_build_forward_expand(gf, cur);
  4395. }
  4396. };
  4397. struct llm_build_falcon : public llm_graph_context {
  4398. llm_build_falcon(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4399. const int64_t n_embd_head = hparams.n_embd_head_v;
  4400. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4401. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4402. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4403. ggml_tensor * cur;
  4404. ggml_tensor * inpL;
  4405. inpL = build_inp_embd(model.tok_embd);
  4406. // inp_pos - contains the positions
  4407. ggml_tensor * inp_pos = build_inp_pos();
  4408. auto * inp_attn = build_attn_inp_kv_unified();
  4409. for (int il = 0; il < n_layer; ++il) {
  4410. ggml_tensor * attn_norm;
  4411. attn_norm = build_norm(inpL,
  4412. model.layers[il].attn_norm,
  4413. model.layers[il].attn_norm_b,
  4414. LLM_NORM, il);
  4415. cb(attn_norm, "attn_norm", il);
  4416. // self-attention
  4417. {
  4418. if (model.layers[il].attn_norm_2) {
  4419. // Falcon-40B
  4420. cur = build_norm(inpL,
  4421. model.layers[il].attn_norm_2,
  4422. model.layers[il].attn_norm_2_b,
  4423. LLM_NORM, il);
  4424. cb(cur, "attn_norm_2", il);
  4425. } else {
  4426. cur = attn_norm;
  4427. }
  4428. cur = build_lora_mm(model.layers[il].wqkv, cur);
  4429. cb(cur, "wqkv", il);
  4430. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4431. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4432. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4433. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4434. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4435. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4436. // using mode = 2 for neox mode
  4437. Qcur = ggml_rope_ext(
  4438. ctx0, Qcur, inp_pos, nullptr,
  4439. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4440. ext_factor, attn_factor, beta_fast, beta_slow
  4441. );
  4442. Kcur = ggml_rope_ext(
  4443. ctx0, Kcur, inp_pos, nullptr,
  4444. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4445. ext_factor, attn_factor, beta_fast, beta_slow
  4446. );
  4447. cb(Qcur, "Qcur", il);
  4448. cb(Kcur, "Kcur", il);
  4449. cb(Vcur, "Vcur", il);
  4450. cur = build_attn(inp_attn, gf,
  4451. model.layers[il].wo, NULL,
  4452. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4453. }
  4454. if (il == n_layer - 1) {
  4455. // skip computing output for unused tokens
  4456. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4457. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4458. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  4459. attn_norm = ggml_get_rows(ctx0, attn_norm, inp_out_ids);
  4460. }
  4461. ggml_tensor * ffn_inp = cur;
  4462. // feed forward
  4463. {
  4464. cur = build_ffn(attn_norm, // !! use the attn norm, not the result
  4465. model.layers[il].ffn_up, NULL, NULL,
  4466. NULL, NULL, NULL,
  4467. model.layers[il].ffn_down, NULL, NULL,
  4468. NULL,
  4469. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  4470. cb(cur, "ffn_out", il);
  4471. }
  4472. cur = ggml_add(ctx0, cur, ffn_inp);
  4473. cur = ggml_add(ctx0, cur, inpL);
  4474. cur = build_cvec(cur, il);
  4475. cb(cur, "l_out", il);
  4476. // input for next layer
  4477. inpL = cur;
  4478. }
  4479. cur = inpL;
  4480. // norm
  4481. cur = build_norm(cur,
  4482. model.output_norm,
  4483. model.output_norm_b,
  4484. LLM_NORM, -1);
  4485. cb(cur, "result_norm", -1);
  4486. res->t_embd = cur;
  4487. cur = build_lora_mm(model.output, cur);
  4488. cb(cur, "result_output", -1);
  4489. res->t_logits = cur;
  4490. ggml_build_forward_expand(gf, cur);
  4491. }
  4492. };
  4493. struct llm_build_grok : public llm_graph_context {
  4494. llm_build_grok(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4495. const int64_t n_embd_head = hparams.n_embd_head_v;
  4496. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4497. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4498. ggml_tensor * cur;
  4499. ggml_tensor * inpL;
  4500. inpL = build_inp_embd(model.tok_embd);
  4501. // multiply by embedding_multiplier_scale of 78.38367176906169
  4502. inpL = ggml_scale(ctx0, inpL, 78.38367176906169f);
  4503. // inp_pos - contains the positions
  4504. ggml_tensor * inp_pos = build_inp_pos();
  4505. auto * inp_attn = build_attn_inp_kv_unified();
  4506. for (int il = 0; il < n_layer; ++il) {
  4507. ggml_tensor * inpSA = inpL;
  4508. // norm
  4509. cur = build_norm(inpL,
  4510. model.layers[il].attn_norm, NULL,
  4511. LLM_NORM_RMS, il);
  4512. cb(cur, "attn_norm", il);
  4513. // self-attention
  4514. {
  4515. // compute Q and K and RoPE them
  4516. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  4517. cb(Qcur, "Qcur", il);
  4518. if (model.layers[il].bq) {
  4519. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  4520. cb(Qcur, "Qcur", il);
  4521. }
  4522. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  4523. cb(Kcur, "Kcur", il);
  4524. if (model.layers[il].bk) {
  4525. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  4526. cb(Kcur, "Kcur", il);
  4527. }
  4528. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  4529. cb(Vcur, "Vcur", il);
  4530. if (model.layers[il].bv) {
  4531. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  4532. cb(Vcur, "Vcur", il);
  4533. }
  4534. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4535. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4536. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4537. Qcur = ggml_rope_ext(
  4538. ctx0, Qcur, inp_pos, nullptr,
  4539. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4540. ext_factor, attn_factor, beta_fast, beta_slow
  4541. );
  4542. Kcur = ggml_rope_ext(
  4543. ctx0, Kcur, inp_pos, nullptr,
  4544. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4545. ext_factor, attn_factor, beta_fast, beta_slow
  4546. );
  4547. cb(Qcur, "Qcur", il);
  4548. cb(Kcur, "Kcur", il);
  4549. cb(Vcur, "Vcur", il);
  4550. cur = build_attn(inp_attn, gf,
  4551. model.layers[il].wo, model.layers[il].bo,
  4552. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
  4553. }
  4554. if (il == n_layer - 1) {
  4555. // skip computing output for unused tokens
  4556. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4557. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4558. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4559. }
  4560. // Grok
  4561. // if attn_out_norm is present then apply it before adding the input
  4562. if (model.layers[il].attn_out_norm) {
  4563. cur = build_norm(cur,
  4564. model.layers[il].attn_out_norm, NULL,
  4565. LLM_NORM_RMS, il);
  4566. cb(cur, "attn_out_norm", il);
  4567. }
  4568. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4569. cb(ffn_inp, "ffn_inp", il);
  4570. // feed-forward network
  4571. // MoE branch
  4572. cur = build_norm(ffn_inp,
  4573. model.layers[il].ffn_norm, NULL,
  4574. LLM_NORM_RMS, il);
  4575. cb(cur, "ffn_norm", il);
  4576. cur = build_moe_ffn(cur,
  4577. model.layers[il].ffn_gate_inp,
  4578. model.layers[il].ffn_up_exps,
  4579. model.layers[il].ffn_gate_exps,
  4580. model.layers[il].ffn_down_exps,
  4581. nullptr,
  4582. n_expert, n_expert_used,
  4583. LLM_FFN_GELU, true,
  4584. false, 0.0,
  4585. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  4586. il);
  4587. cb(cur, "ffn_moe_out", il);
  4588. // Grok
  4589. // if layer_out_norm is present then apply it before adding the input
  4590. // Idea: maybe ffn_out_norm is a better name
  4591. if (model.layers[il].layer_out_norm) {
  4592. cur = build_norm(cur,
  4593. model.layers[il].layer_out_norm, NULL,
  4594. LLM_NORM_RMS, il);
  4595. cb(cur, "layer_out_norm", il);
  4596. }
  4597. cur = ggml_add(ctx0, cur, ffn_inp);
  4598. cb(cur, "ffn_out", il);
  4599. cur = build_cvec(cur, il);
  4600. cb(cur, "l_out", il);
  4601. // input for next layer
  4602. inpL = cur;
  4603. }
  4604. cur = inpL;
  4605. cur = build_norm(cur,
  4606. model.output_norm, NULL,
  4607. LLM_NORM_RMS, -1);
  4608. cb(cur, "result_norm", -1);
  4609. res->t_embd = cur;
  4610. // lm_head
  4611. cur = build_lora_mm(model.output, cur);
  4612. // Grok
  4613. // multiply logits by output_multiplier_scale of 0.5773502691896257
  4614. cur = ggml_scale(ctx0, cur, 0.5773502691896257f);
  4615. cb(cur, "result_output", -1);
  4616. res->t_logits = cur;
  4617. ggml_build_forward_expand(gf, cur);
  4618. }
  4619. };
  4620. struct llm_build_dbrx : public llm_graph_context {
  4621. llm_build_dbrx(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4622. const int64_t n_embd_head = hparams.n_embd_head_v;
  4623. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4624. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4625. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4626. ggml_tensor * cur;
  4627. ggml_tensor * inpL;
  4628. inpL = build_inp_embd(model.tok_embd);
  4629. // inp_pos - contains the positions
  4630. ggml_tensor * inp_pos = build_inp_pos();
  4631. auto * inp_attn = build_attn_inp_kv_unified();
  4632. for (int il = 0; il < n_layer; ++il) {
  4633. ggml_tensor * inpSA = inpL;
  4634. // norm
  4635. cur = build_norm(inpL,
  4636. model.layers[il].attn_norm, NULL,
  4637. LLM_NORM, il);
  4638. cb(cur, "attn_norm", il);
  4639. // self-attention
  4640. {
  4641. ggml_tensor * Qcur = nullptr;
  4642. ggml_tensor * Kcur = nullptr;
  4643. ggml_tensor * Vcur = nullptr;
  4644. cur = build_lora_mm(model.layers[il].wqkv, cur);
  4645. cb(cur, "wqkv", il);
  4646. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  4647. cb(cur, "wqkv_clamped", il);
  4648. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4649. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4650. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4651. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4652. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4653. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4654. Qcur = ggml_rope_ext(
  4655. ctx0, Qcur, inp_pos, nullptr,
  4656. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4657. ext_factor, attn_factor, beta_fast, beta_slow
  4658. );
  4659. Kcur = ggml_rope_ext(
  4660. ctx0, Kcur, inp_pos, nullptr,
  4661. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4662. ext_factor, attn_factor, beta_fast, beta_slow
  4663. );
  4664. cb(Qcur, "Qcur", il);
  4665. cb(Kcur, "Kcur", il);
  4666. cb(Vcur, "Vcur", il);
  4667. cur = build_attn(inp_attn, gf,
  4668. model.layers[il].wo, NULL,
  4669. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4670. }
  4671. if (il == n_layer - 1) {
  4672. // skip computing output for unused tokens
  4673. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4674. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4675. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4676. }
  4677. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4678. cb(ffn_inp, "ffn_inp", il);
  4679. // feed-forward network
  4680. // MoE branch
  4681. cur = build_norm(ffn_inp,
  4682. model.layers[il].attn_out_norm, NULL,
  4683. LLM_NORM, il);
  4684. cb(cur, "attn_out_norm", il);
  4685. cur = build_moe_ffn(cur,
  4686. model.layers[il].ffn_gate_inp,
  4687. model.layers[il].ffn_up_exps,
  4688. model.layers[il].ffn_gate_exps,
  4689. model.layers[il].ffn_down_exps,
  4690. nullptr,
  4691. n_expert, n_expert_used,
  4692. LLM_FFN_SILU, true,
  4693. false, 0.0,
  4694. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  4695. il);
  4696. cb(cur, "ffn_moe_out", il);
  4697. cur = ggml_add(ctx0, cur, ffn_inp);
  4698. cb(cur, "ffn_out", il);
  4699. cur = build_cvec(cur, il);
  4700. cb(cur, "l_out", il);
  4701. // input for next layer
  4702. inpL = cur;
  4703. }
  4704. cur = inpL;
  4705. cur = build_norm(cur,
  4706. model.output_norm, NULL,
  4707. LLM_NORM, -1);
  4708. cb(cur, "result_norm", -1);
  4709. res->t_embd = cur;
  4710. // lm_head
  4711. cur = build_lora_mm(model.output, cur);
  4712. cb(cur, "result_output", -1);
  4713. res->t_logits = cur;
  4714. ggml_build_forward_expand(gf, cur);
  4715. }
  4716. };
  4717. struct llm_build_starcoder : public llm_graph_context {
  4718. llm_build_starcoder(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4719. const int64_t n_embd_head = hparams.n_embd_head_v;
  4720. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4721. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4722. ggml_tensor * cur;
  4723. ggml_tensor * inpL;
  4724. inpL = build_inp_embd(model.tok_embd);
  4725. // inp_pos - contains the positions
  4726. ggml_tensor * inp_pos = build_inp_pos();
  4727. auto * inp_attn = build_attn_inp_kv_unified();
  4728. ggml_tensor * pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  4729. cb(pos, "pos_embd", -1);
  4730. inpL = ggml_add(ctx0, inpL, pos);
  4731. cb(inpL, "inpL", -1);
  4732. for (int il = 0; il < n_layer; ++il) {
  4733. cur = build_norm(inpL,
  4734. model.layers[il].attn_norm,
  4735. model.layers[il].attn_norm_b,
  4736. LLM_NORM, il);
  4737. cb(cur, "attn_norm", il);
  4738. // self-attention
  4739. {
  4740. cur = build_lora_mm(model.layers[il].wqkv, cur);
  4741. cb(cur, "wqkv", il);
  4742. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4743. cb(cur, "bqkv", il);
  4744. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4745. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4746. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4747. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4748. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4749. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4750. cb(Qcur, "Qcur", il);
  4751. cb(Kcur, "Kcur", il);
  4752. cb(Vcur, "Vcur", il);
  4753. cur = build_attn(inp_attn, gf,
  4754. model.layers[il].wo, model.layers[il].bo,
  4755. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4756. }
  4757. if (il == n_layer - 1) {
  4758. // skip computing output for unused tokens
  4759. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4760. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4761. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  4762. }
  4763. // add the input
  4764. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4765. cb(ffn_inp, "ffn_inp", il);
  4766. // FF
  4767. {
  4768. cur = build_norm(ffn_inp,
  4769. model.layers[il].ffn_norm,
  4770. model.layers[il].ffn_norm_b,
  4771. LLM_NORM, il);
  4772. cb(cur, "ffn_norm", il);
  4773. cur = build_ffn(cur,
  4774. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  4775. NULL, NULL, NULL,
  4776. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  4777. NULL,
  4778. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  4779. cb(cur, "ffn_out", il);
  4780. }
  4781. cur = ggml_add(ctx0, cur, ffn_inp);
  4782. cur = build_cvec(cur, il);
  4783. cb(cur, "l_out", il);
  4784. // input for next layer
  4785. inpL = cur;
  4786. }
  4787. cur = build_norm(inpL,
  4788. model.output_norm,
  4789. model.output_norm_b,
  4790. LLM_NORM, -1);
  4791. cb(cur, "result_norm", -1);
  4792. res->t_embd = cur;
  4793. cur = build_lora_mm(model.output, cur);
  4794. cb(cur, "result_output", -1);
  4795. res->t_logits = cur;
  4796. ggml_build_forward_expand(gf, cur);
  4797. }
  4798. };
  4799. struct llm_build_refact : public llm_graph_context {
  4800. llm_build_refact(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4801. const int64_t n_embd_head = hparams.n_embd_head_v;
  4802. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4803. ggml_tensor * cur;
  4804. ggml_tensor * inpL;
  4805. inpL = build_inp_embd(model.tok_embd);
  4806. auto * inp_attn = build_attn_inp_kv_unified();
  4807. for (int il = 0; il < n_layer; ++il) {
  4808. ggml_tensor * inpSA = inpL;
  4809. cur = build_norm(inpL,
  4810. model.layers[il].attn_norm, NULL,
  4811. LLM_NORM_RMS, il);
  4812. cb(cur, "attn_norm", il);
  4813. // self-attention
  4814. {
  4815. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  4816. cb(Qcur, "Qcur", il);
  4817. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  4818. cb(Kcur, "Kcur", il);
  4819. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  4820. cb(Vcur, "Vcur", il);
  4821. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4822. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4823. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4824. cb(Qcur, "Qcur", il);
  4825. cb(Kcur, "Kcur", il);
  4826. cb(Vcur, "Vcur", il);
  4827. cur = build_attn(inp_attn, gf,
  4828. model.layers[il].wo, NULL,
  4829. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4830. }
  4831. if (il == n_layer - 1) {
  4832. // skip computing output for unused tokens
  4833. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4834. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4835. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4836. }
  4837. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4838. cb(ffn_inp, "ffn_inp", il);
  4839. // feed-forward network
  4840. {
  4841. cur = build_norm(ffn_inp,
  4842. model.layers[il].ffn_norm, NULL,
  4843. LLM_NORM_RMS, il);
  4844. cb(cur, "ffn_norm", il);
  4845. cur = build_ffn(cur,
  4846. model.layers[il].ffn_up, NULL, NULL,
  4847. model.layers[il].ffn_gate, NULL, NULL,
  4848. model.layers[il].ffn_down, NULL, NULL,
  4849. NULL,
  4850. LLM_FFN_SILU, LLM_FFN_PAR, il);
  4851. cb(cur, "ffn_out", il);
  4852. }
  4853. cur = ggml_add(ctx0, cur, ffn_inp);
  4854. cur = build_cvec(cur, il);
  4855. cb(cur, "l_out", il);
  4856. // input for next layer
  4857. inpL = cur;
  4858. }
  4859. cur = inpL;
  4860. cur = build_norm(cur,
  4861. model.output_norm, NULL,
  4862. LLM_NORM_RMS, -1);
  4863. cb(cur, "result_norm", -1);
  4864. res->t_embd = cur;
  4865. // lm_head
  4866. cur = build_lora_mm(model.output, cur);
  4867. cb(cur, "result_output", -1);
  4868. res->t_logits = cur;
  4869. ggml_build_forward_expand(gf, cur);
  4870. }
  4871. };
  4872. struct llm_build_bert : public llm_graph_context {
  4873. llm_build_bert(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4874. const int64_t n_embd_head = hparams.n_embd_head_v;
  4875. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4876. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4877. ggml_tensor * cur;
  4878. ggml_tensor * inpL;
  4879. ggml_tensor * inp_pos = nullptr;
  4880. if (model.arch != LLM_ARCH_JINA_BERT_V2) {
  4881. inp_pos = build_inp_pos();
  4882. }
  4883. // construct input embeddings (token, type, position)
  4884. inpL = build_inp_embd(model.tok_embd);
  4885. // token types are hardcoded to zero ("Sentence A")
  4886. if (model.type_embd) {
  4887. ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0);
  4888. inpL = ggml_add(ctx0, inpL, type_row0);
  4889. }
  4890. if (model.arch == LLM_ARCH_BERT) {
  4891. inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL);
  4892. }
  4893. cb(inpL, "inp_embd", -1);
  4894. // embed layer norm
  4895. inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1);
  4896. cb(inpL, "inp_norm", -1);
  4897. auto * inp_attn = build_attn_inp_no_cache();
  4898. // iterate layers
  4899. for (int il = 0; il < n_layer; ++il) {
  4900. ggml_tensor * cur = inpL;
  4901. ggml_tensor * Qcur;
  4902. ggml_tensor * Kcur;
  4903. ggml_tensor * Vcur;
  4904. // self-attention
  4905. if (model.layers[il].wqkv) {
  4906. cur = build_lora_mm(model.layers[il].wqkv, cur);
  4907. cb(cur, "wqkv", il);
  4908. if (model.layers[il].bqkv) {
  4909. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4910. cb(cur, "bqkv", il);
  4911. }
  4912. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4913. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4914. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4915. } else {
  4916. Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq);
  4917. Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk);
  4918. Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv);
  4919. }
  4920. if (model.layers[il].attn_q_norm) {
  4921. Qcur = build_norm(Qcur,
  4922. model.layers[il].attn_q_norm,
  4923. model.layers[il].attn_q_norm_b,
  4924. LLM_NORM, il);
  4925. }
  4926. if (model.layers[il].attn_k_norm) {
  4927. Kcur = build_norm(Kcur,
  4928. model.layers[il].attn_k_norm,
  4929. model.layers[il].attn_k_norm_b,
  4930. LLM_NORM, il);
  4931. }
  4932. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4933. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4934. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4935. // RoPE
  4936. if (model.arch == LLM_ARCH_NOMIC_BERT || model.arch == LLM_ARCH_NOMIC_BERT_MOE) {
  4937. Qcur = ggml_rope_ext(
  4938. ctx0, Qcur, inp_pos, nullptr,
  4939. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4940. ext_factor, attn_factor, beta_fast, beta_slow
  4941. );
  4942. Kcur = ggml_rope_ext(
  4943. ctx0, Kcur, inp_pos, nullptr,
  4944. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4945. ext_factor, attn_factor, beta_fast, beta_slow
  4946. );
  4947. }
  4948. cb(Qcur, "Qcur", il);
  4949. cb(Kcur, "Kcur", il);
  4950. cb(Vcur, "Vcur", il);
  4951. cur = build_attn(inp_attn, gf,
  4952. model.layers[il].wo, model.layers[il].bo,
  4953. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4954. cb(cur, "kqv_out", il);
  4955. if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) {
  4956. // skip computing output for unused tokens
  4957. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4958. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4959. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  4960. }
  4961. // re-add the layer input
  4962. cur = ggml_add(ctx0, cur, inpL);
  4963. // attention layer norm
  4964. cur = build_norm(cur, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, il);
  4965. if (model.layers[il].attn_norm_2 != nullptr) {
  4966. cur = ggml_add(ctx0, cur, inpL); // re-add the layer input
  4967. cur = build_norm(cur, model.layers[il].attn_norm_2, model.layers[il].attn_norm_2_b, LLM_NORM, il);
  4968. }
  4969. ggml_tensor * ffn_inp = cur;
  4970. cb(ffn_inp, "ffn_inp", il);
  4971. // feed-forward network
  4972. if (hparams.moe_every_n_layers > 0 && il % hparams.moe_every_n_layers == 1) {
  4973. // MoE branch
  4974. cur = build_moe_ffn(cur,
  4975. model.layers[il].ffn_gate_inp,
  4976. model.layers[il].ffn_up_exps,
  4977. nullptr,
  4978. model.layers[il].ffn_down_exps,
  4979. nullptr,
  4980. hparams.n_expert,
  4981. hparams.n_expert_used,
  4982. LLM_FFN_GELU,
  4983. false, false,
  4984. 0.0f,
  4985. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, il);
  4986. cb(cur, "ffn_moe_out", il);
  4987. } else if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_NOMIC_BERT_MOE) {
  4988. cur = build_ffn(cur,
  4989. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  4990. NULL, NULL, NULL,
  4991. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  4992. NULL,
  4993. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  4994. cb(cur, "ffn_out", il);
  4995. } else if (model.arch == LLM_ARCH_JINA_BERT_V2) {
  4996. cur = build_ffn(cur,
  4997. model.layers[il].ffn_up, NULL, NULL,
  4998. model.layers[il].ffn_gate, NULL, NULL,
  4999. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  5000. NULL,
  5001. model.layers[il].ffn_gate ? LLM_FFN_GELU : LLM_FFN_GEGLU, LLM_FFN_PAR, il);
  5002. cb(cur, "ffn_out", il);
  5003. } else {
  5004. cur = build_ffn(cur,
  5005. model.layers[il].ffn_up, NULL, NULL,
  5006. model.layers[il].ffn_gate, NULL, NULL,
  5007. model.layers[il].ffn_down, NULL, NULL,
  5008. NULL,
  5009. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5010. cb(cur, "ffn_out", il);
  5011. }
  5012. // attentions bypass the intermediate layer
  5013. cur = ggml_add(ctx0, cur, ffn_inp);
  5014. // output layer norm
  5015. cur = build_norm(cur, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, il);
  5016. // input for next layer
  5017. inpL = cur;
  5018. }
  5019. cur = inpL;
  5020. cb(cur, "result_embd", -1);
  5021. res->t_embd = cur;
  5022. ggml_build_forward_expand(gf, cur);
  5023. }
  5024. };
  5025. struct llm_build_bloom : public llm_graph_context {
  5026. llm_build_bloom(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5027. const int64_t n_embd_head = hparams.n_embd_head_v;
  5028. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5029. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5030. ggml_tensor * cur;
  5031. ggml_tensor * inpL;
  5032. inpL = build_inp_embd(model.tok_embd);
  5033. auto * inp_attn = build_attn_inp_kv_unified();
  5034. inpL = build_norm(inpL,
  5035. model.tok_norm,
  5036. model.tok_norm_b,
  5037. LLM_NORM, -1);
  5038. cb(inpL, "inp_norm", -1);
  5039. for (int il = 0; il < n_layer; ++il) {
  5040. cur = build_norm(inpL,
  5041. model.layers[il].attn_norm,
  5042. model.layers[il].attn_norm_b,
  5043. LLM_NORM, il);
  5044. cb(cur, "attn_norm", il);
  5045. // self-attention
  5046. {
  5047. cur = build_lora_mm(model.layers[il].wqkv, cur);
  5048. cb(cur, "wqkv", il);
  5049. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5050. cb(cur, "bqkv", il);
  5051. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5052. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5053. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5054. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5055. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5056. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5057. cb(Qcur, "Qcur", il);
  5058. cb(Kcur, "Kcur", il);
  5059. cb(Vcur, "Vcur", il);
  5060. cur = build_attn(inp_attn, gf,
  5061. model.layers[il].wo, model.layers[il].bo,
  5062. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5063. }
  5064. if (il == n_layer - 1) {
  5065. // skip computing output for unused tokens
  5066. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5067. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5068. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  5069. }
  5070. // Add the input
  5071. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5072. cb(ffn_inp, "ffn_inp", il);
  5073. // FF
  5074. {
  5075. cur = build_norm(ffn_inp,
  5076. model.layers[il].ffn_norm,
  5077. model.layers[il].ffn_norm_b,
  5078. LLM_NORM, il);
  5079. cb(cur, "ffn_norm", il);
  5080. cur = build_ffn(cur,
  5081. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  5082. NULL, NULL, NULL,
  5083. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  5084. NULL,
  5085. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  5086. cb(cur, "ffn_out", il);
  5087. }
  5088. cur = ggml_add(ctx0, cur, ffn_inp);
  5089. cur = build_cvec(cur, il);
  5090. cb(cur, "l_out", il);
  5091. // input for next layer
  5092. inpL = cur;
  5093. }
  5094. cur = build_norm(inpL,
  5095. model.output_norm,
  5096. model.output_norm_b,
  5097. LLM_NORM, -1);
  5098. cb(cur, "result_norm", -1);
  5099. res->t_embd = cur;
  5100. cur = build_lora_mm(model.output, cur);
  5101. cb(cur, "result_output", -1);
  5102. res->t_logits = cur;
  5103. ggml_build_forward_expand(gf, cur);
  5104. }
  5105. };
  5106. struct llm_build_mpt : public llm_graph_context {
  5107. llm_build_mpt(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5108. const int64_t n_embd_head = hparams.n_embd_head_v;
  5109. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5110. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5111. ggml_tensor * cur;
  5112. ggml_tensor * pos;
  5113. ggml_tensor * inpL;
  5114. inpL = build_inp_embd(model.tok_embd);
  5115. auto * inp_attn = build_attn_inp_kv_unified();
  5116. if (model.pos_embd) {
  5117. // inp_pos - contains the positions
  5118. ggml_tensor * inp_pos = build_inp_pos();
  5119. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  5120. cb(pos, "pos_embd", -1);
  5121. inpL = ggml_add(ctx0, inpL, pos);
  5122. cb(inpL, "inpL", -1);
  5123. }
  5124. for (int il = 0; il < n_layer; ++il) {
  5125. ggml_tensor * attn_norm;
  5126. attn_norm = build_norm(inpL,
  5127. model.layers[il].attn_norm,
  5128. model.layers[il].attn_norm_b,
  5129. LLM_NORM, il);
  5130. cb(attn_norm, "attn_norm", il);
  5131. // self-attention
  5132. {
  5133. cur = attn_norm;
  5134. cur = build_lora_mm(model.layers[il].wqkv, cur);
  5135. cb(cur, "wqkv", il);
  5136. if (model.layers[il].bqkv){
  5137. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5138. cb(cur, "bqkv", il);
  5139. }
  5140. if (hparams.f_clamp_kqv > 0.0f) {
  5141. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  5142. cb(cur, "wqkv_clamped", il);
  5143. }
  5144. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5145. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5146. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5147. cb(Qcur, "Qcur", il);
  5148. cb(Kcur, "Kcur", il);
  5149. cb(Vcur, "Vcur", il);
  5150. // Q/K Layernorm
  5151. if (model.layers[il].attn_q_norm) {
  5152. Qcur = build_norm(Qcur,
  5153. model.layers[il].attn_q_norm,
  5154. model.layers[il].attn_q_norm_b,
  5155. LLM_NORM, il);
  5156. cb(Qcur, "Qcur", il);
  5157. Kcur = build_norm(Kcur,
  5158. model.layers[il].attn_k_norm,
  5159. model.layers[il].attn_k_norm_b,
  5160. LLM_NORM, il);
  5161. cb(Kcur, "Kcur", il);
  5162. }
  5163. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5164. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5165. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5166. cb(Qcur, "Qcur", il);
  5167. cb(Kcur, "Kcur", il);
  5168. cb(Vcur, "Vcur", il);
  5169. cur = build_attn(inp_attn, gf,
  5170. model.layers[il].wo, model.layers[il].bo,
  5171. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5172. }
  5173. if (il == n_layer - 1) {
  5174. // skip computing output for unused tokens
  5175. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5176. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5177. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  5178. }
  5179. // Add the input
  5180. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5181. cb(ffn_inp, "ffn_inp", il);
  5182. // feed forward
  5183. {
  5184. cur = build_norm(ffn_inp,
  5185. model.layers[il].ffn_norm,
  5186. model.layers[il].ffn_norm_b,
  5187. LLM_NORM, il);
  5188. cb(cur, "ffn_norm", il);
  5189. cur = build_ffn(cur,
  5190. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  5191. NULL, NULL, NULL,
  5192. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  5193. model.layers[il].ffn_act,
  5194. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  5195. cb(cur, "ffn_out", il);
  5196. }
  5197. cur = ggml_add(ctx0, cur, ffn_inp);
  5198. cur = build_cvec(cur, il);
  5199. cb(cur, "l_out", il);
  5200. // input for next layer
  5201. inpL = cur;
  5202. }
  5203. cur = inpL;
  5204. cur = build_norm(cur,
  5205. model.output_norm,
  5206. model.output_norm_b,
  5207. LLM_NORM, -1);
  5208. cb(cur, "result_norm", -1);
  5209. res->t_embd = cur;
  5210. cur = build_lora_mm(model.output, cur);
  5211. cb(cur, "result_output", -1);
  5212. res->t_logits = cur;
  5213. ggml_build_forward_expand(gf, cur);
  5214. }
  5215. };
  5216. struct llm_build_stablelm : public llm_graph_context {
  5217. llm_build_stablelm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5218. const int64_t n_embd_head = hparams.n_embd_head_v;
  5219. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5220. ggml_tensor * cur;
  5221. ggml_tensor * inpL;
  5222. inpL = build_inp_embd(model.tok_embd);
  5223. // inp_pos - contains the positions
  5224. ggml_tensor * inp_pos = build_inp_pos();
  5225. auto * inp_attn = build_attn_inp_kv_unified();
  5226. for (int il = 0; il < n_layer; ++il) {
  5227. // norm
  5228. cur = build_norm(inpL,
  5229. model.layers[il].attn_norm,
  5230. model.layers[il].attn_norm_b,
  5231. LLM_NORM, il);
  5232. cb(cur, "attn_norm", il);
  5233. ggml_tensor * inpSA = cur;
  5234. // self-attention
  5235. {
  5236. // compute Q and K and RoPE them
  5237. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5238. cb(Qcur, "Qcur", il);
  5239. if (model.layers[il].bq) {
  5240. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5241. cb(Qcur, "Qcur", il);
  5242. }
  5243. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5244. cb(Kcur, "Kcur", il);
  5245. if (model.layers[il].bk) {
  5246. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5247. cb(Kcur, "Kcur", il);
  5248. }
  5249. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5250. cb(Vcur, "Vcur", il);
  5251. if (model.layers[il].bv) {
  5252. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5253. cb(Vcur, "Vcur", il);
  5254. }
  5255. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5256. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5257. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5258. if (model.layers[il].attn_q_norm) {
  5259. Qcur = build_norm(Qcur,
  5260. model.layers[il].attn_q_norm,
  5261. NULL,
  5262. LLM_NORM, il);
  5263. cb(Qcur, "Qcur", il);
  5264. }
  5265. if (model.layers[il].attn_k_norm) {
  5266. Kcur = build_norm(Kcur,
  5267. model.layers[il].attn_k_norm,
  5268. NULL,
  5269. LLM_NORM, il);
  5270. cb(Kcur, "Kcur", il);
  5271. }
  5272. Qcur = ggml_rope_ext(
  5273. ctx0, Qcur, inp_pos, nullptr,
  5274. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5275. ext_factor, attn_factor, beta_fast, beta_slow
  5276. );
  5277. Kcur = ggml_rope_ext(
  5278. ctx0, Kcur, inp_pos, nullptr,
  5279. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5280. ext_factor, attn_factor, beta_fast, beta_slow
  5281. );
  5282. cb(Qcur, "Qcur", il);
  5283. cb(Kcur, "Kcur", il);
  5284. cb(Vcur, "Vcur", il);
  5285. cur = build_attn(inp_attn, gf,
  5286. model.layers[il].wo, NULL,
  5287. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5288. }
  5289. if (il == n_layer - 1) {
  5290. // skip computing output for unused tokens
  5291. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5292. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5293. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  5294. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5295. }
  5296. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5297. cb(ffn_inp, "ffn_inp", il);
  5298. // feed-forward network
  5299. {
  5300. if (model.layers[il].ffn_norm) {
  5301. cur = build_norm(ffn_inp,
  5302. model.layers[il].ffn_norm,
  5303. model.layers[il].ffn_norm_b,
  5304. LLM_NORM, il);
  5305. cb(cur, "ffn_norm", il);
  5306. } else {
  5307. // parallel residual
  5308. cur = inpSA;
  5309. }
  5310. cur = build_ffn(cur,
  5311. model.layers[il].ffn_up, NULL, NULL,
  5312. model.layers[il].ffn_gate, NULL, NULL,
  5313. model.layers[il].ffn_down, NULL, NULL,
  5314. NULL,
  5315. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5316. cb(cur, "ffn_out", il);
  5317. }
  5318. cur = ggml_add(ctx0, cur, ffn_inp);
  5319. cur = build_cvec(cur, il);
  5320. cb(cur, "l_out", il);
  5321. // input for next layer
  5322. inpL = cur;
  5323. }
  5324. cur = inpL;
  5325. cur = build_norm(cur,
  5326. model.output_norm,
  5327. model.output_norm_b,
  5328. LLM_NORM, -1);
  5329. cb(cur, "result_norm", -1);
  5330. res->t_embd = cur;
  5331. // lm_head
  5332. cur = build_lora_mm(model.output, cur);
  5333. cb(cur, "result_output", -1);
  5334. res->t_logits = cur;
  5335. ggml_build_forward_expand(gf, cur);
  5336. }
  5337. };
  5338. struct llm_build_qwen : public llm_graph_context {
  5339. llm_build_qwen(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5340. const int64_t n_embd_head = hparams.n_embd_head_v;
  5341. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5342. ggml_tensor * cur;
  5343. ggml_tensor * inpL;
  5344. inpL = build_inp_embd(model.tok_embd);
  5345. // inp_pos - contains the positions
  5346. ggml_tensor * inp_pos = build_inp_pos();
  5347. auto * inp_attn = build_attn_inp_kv_unified();
  5348. for (int il = 0; il < n_layer; ++il) {
  5349. ggml_tensor * inpSA = inpL;
  5350. cur = build_norm(inpL,
  5351. model.layers[il].attn_norm, NULL,
  5352. LLM_NORM_RMS, il);
  5353. cb(cur, "attn_norm", il);
  5354. // self-attention
  5355. {
  5356. cur = build_lora_mm(model.layers[il].wqkv, cur);
  5357. cb(cur, "wqkv", il);
  5358. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5359. cb(cur, "bqkv", il);
  5360. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5361. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5362. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd)));
  5363. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5364. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5365. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5366. // using mode = 2 for neox mode
  5367. Qcur = ggml_rope_ext(
  5368. ctx0, Qcur, inp_pos, nullptr,
  5369. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5370. ext_factor, attn_factor, beta_fast, beta_slow
  5371. );
  5372. Kcur = ggml_rope_ext(
  5373. ctx0, Kcur, inp_pos, nullptr,
  5374. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5375. ext_factor, attn_factor, beta_fast, beta_slow
  5376. );
  5377. cb(Qcur, "Qcur", il);
  5378. cb(Kcur, "Kcur", il);
  5379. cb(Vcur, "Vcur", il);
  5380. cur = build_attn(inp_attn, gf,
  5381. model.layers[il].wo, NULL,
  5382. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5383. }
  5384. if (il == n_layer - 1) {
  5385. // skip computing output for unused tokens
  5386. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5387. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5388. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5389. }
  5390. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5391. cb(ffn_inp, "ffn_inp", il);
  5392. // feed-forward forward
  5393. {
  5394. cur = build_norm(ffn_inp,
  5395. model.layers[il].ffn_norm, NULL,
  5396. LLM_NORM_RMS, il);
  5397. cb(cur, "ffn_norm", il);
  5398. cur = build_ffn(cur,
  5399. model.layers[il].ffn_up, NULL, NULL,
  5400. model.layers[il].ffn_gate, NULL, NULL,
  5401. model.layers[il].ffn_down, NULL, NULL,
  5402. NULL,
  5403. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5404. cb(cur, "ffn_out", il);
  5405. }
  5406. cur = ggml_add(ctx0, cur, ffn_inp);
  5407. cur = build_cvec(cur, il);
  5408. cb(cur, "l_out", il);
  5409. // input for next layer
  5410. inpL = cur;
  5411. }
  5412. cur = inpL;
  5413. cur = build_norm(cur,
  5414. model.output_norm, NULL,
  5415. LLM_NORM_RMS, -1);
  5416. cb(cur, "result_norm", -1);
  5417. res->t_embd = cur;
  5418. // lm_head
  5419. cur = build_lora_mm(model.output, cur);
  5420. cb(cur, "result_output", -1);
  5421. res->t_logits = cur;
  5422. ggml_build_forward_expand(gf, cur);
  5423. }
  5424. };
  5425. struct llm_build_qwen2 : public llm_graph_context {
  5426. llm_build_qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5427. const int64_t n_embd_head = hparams.n_embd_head_v;
  5428. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5429. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5430. ggml_tensor * cur;
  5431. ggml_tensor * inpL;
  5432. inpL = build_inp_embd(model.tok_embd);
  5433. // inp_pos - contains the positions
  5434. ggml_tensor * inp_pos = build_inp_pos();
  5435. auto * inp_attn = build_attn_inp_kv_unified();
  5436. for (int il = 0; il < n_layer; ++il) {
  5437. ggml_tensor * inpSA = inpL;
  5438. // norm
  5439. cur = build_norm(inpL,
  5440. model.layers[il].attn_norm, NULL,
  5441. LLM_NORM_RMS, il);
  5442. cb(cur, "attn_norm", il);
  5443. // self-attention
  5444. {
  5445. // compute Q and K and RoPE them
  5446. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5447. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5448. cb(Qcur, "Qcur", il);
  5449. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5450. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5451. cb(Kcur, "Kcur", il);
  5452. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5453. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5454. cb(Vcur, "Vcur", il);
  5455. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5456. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5457. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5458. Qcur = ggml_rope_ext(
  5459. ctx0, Qcur, inp_pos, nullptr,
  5460. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5461. ext_factor, attn_factor, beta_fast, beta_slow
  5462. );
  5463. Kcur = ggml_rope_ext(
  5464. ctx0, Kcur, inp_pos, nullptr,
  5465. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5466. ext_factor, attn_factor, beta_fast, beta_slow
  5467. );
  5468. cb(Qcur, "Qcur", il);
  5469. cb(Kcur, "Kcur", il);
  5470. cb(Vcur, "Vcur", il);
  5471. cur = build_attn(inp_attn, gf,
  5472. model.layers[il].wo, model.layers[il].bo,
  5473. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5474. }
  5475. if (il == n_layer - 1) {
  5476. // skip computing output for unused tokens
  5477. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5478. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5479. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5480. }
  5481. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5482. cb(ffn_inp, "ffn_inp", il);
  5483. // feed-forward network
  5484. cur = build_norm(ffn_inp,
  5485. model.layers[il].ffn_norm, NULL,
  5486. LLM_NORM_RMS, il);
  5487. cb(cur, "ffn_norm", il);
  5488. cur = build_ffn(cur,
  5489. model.layers[il].ffn_up, NULL, NULL,
  5490. model.layers[il].ffn_gate, NULL, NULL,
  5491. model.layers[il].ffn_down, NULL, NULL,
  5492. NULL,
  5493. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5494. cb(cur, "ffn_out", il);
  5495. cur = ggml_add(ctx0, cur, ffn_inp);
  5496. cur = build_cvec(cur, il);
  5497. cb(cur, "l_out", il);
  5498. // input for next layer
  5499. inpL = cur;
  5500. }
  5501. cur = inpL;
  5502. cur = build_norm(cur,
  5503. model.output_norm, NULL,
  5504. LLM_NORM_RMS, -1);
  5505. cb(cur, "result_norm", -1);
  5506. res->t_embd = cur;
  5507. // lm_head
  5508. cur = build_lora_mm(model.output, cur);
  5509. cb(cur, "result_output", -1);
  5510. res->t_logits = cur;
  5511. ggml_build_forward_expand(gf, cur);
  5512. }
  5513. };
  5514. struct llm_build_qwen2vl : public llm_graph_context {
  5515. llm_build_qwen2vl(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5516. const int64_t n_embd_head = hparams.n_embd_head_v;
  5517. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5518. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5519. ggml_tensor * cur;
  5520. ggml_tensor * inpL;
  5521. inpL = build_inp_embd(model.tok_embd);
  5522. // inp_pos - contains the positions
  5523. ggml_tensor * inp_pos = build_inp_pos();
  5524. auto * inp_attn = build_attn_inp_kv_unified();
  5525. int sections[4];
  5526. std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections);
  5527. for (int il = 0; il < n_layer; ++il) {
  5528. ggml_tensor * inpSA = inpL;
  5529. // norm
  5530. cur = build_norm(inpL,
  5531. model.layers[il].attn_norm, NULL,
  5532. LLM_NORM_RMS, il);
  5533. cb(cur, "attn_norm", il);
  5534. // self-attention
  5535. {
  5536. // compute Q and K and RoPE them
  5537. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5538. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5539. cb(Qcur, "Qcur", il);
  5540. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5541. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5542. cb(Kcur, "Kcur", il);
  5543. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5544. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5545. cb(Vcur, "Vcur", il);
  5546. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5547. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5548. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5549. Qcur = ggml_rope_multi(
  5550. ctx0, Qcur, inp_pos, nullptr,
  5551. n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale,
  5552. ext_factor, attn_factor, beta_fast, beta_slow
  5553. );
  5554. Kcur = ggml_rope_multi(
  5555. ctx0, Kcur, inp_pos, nullptr,
  5556. n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale,
  5557. ext_factor, attn_factor, beta_fast, beta_slow
  5558. );
  5559. cb(Qcur, "Qcur", il);
  5560. cb(Kcur, "Kcur", il);
  5561. cb(Vcur, "Vcur", il);
  5562. cur = build_attn(inp_attn, gf,
  5563. model.layers[il].wo, model.layers[il].bo,
  5564. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5565. }
  5566. if (il == n_layer - 1) {
  5567. // skip computing output for unused tokens
  5568. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5569. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5570. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5571. }
  5572. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5573. cb(ffn_inp, "ffn_inp", il);
  5574. // feed-forward network
  5575. cur = build_norm(ffn_inp,
  5576. model.layers[il].ffn_norm, NULL,
  5577. LLM_NORM_RMS, il);
  5578. cb(cur, "ffn_norm", il);
  5579. cur = build_ffn(cur,
  5580. model.layers[il].ffn_up, NULL, NULL,
  5581. model.layers[il].ffn_gate, NULL, NULL,
  5582. model.layers[il].ffn_down, NULL, NULL,
  5583. NULL,
  5584. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5585. cb(cur, "ffn_out", il);
  5586. cur = ggml_add(ctx0, cur, ffn_inp);
  5587. cur = build_cvec(cur, il);
  5588. cb(cur, "l_out", il);
  5589. // input for next layer
  5590. inpL = cur;
  5591. }
  5592. cur = inpL;
  5593. cur = build_norm(cur,
  5594. model.output_norm, NULL,
  5595. LLM_NORM_RMS, -1);
  5596. cb(cur, "result_norm", -1);
  5597. res->t_embd = cur;
  5598. // lm_head
  5599. cur = build_lora_mm(model.output, cur);
  5600. cb(cur, "result_output", -1);
  5601. res->t_logits = cur;
  5602. ggml_build_forward_expand(gf, cur);
  5603. }
  5604. };
  5605. struct llm_build_qwen2moe : public llm_graph_context {
  5606. llm_build_qwen2moe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5607. const int64_t n_embd_head = hparams.n_embd_head_v;
  5608. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5609. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5610. ggml_tensor * cur;
  5611. ggml_tensor * inpL;
  5612. inpL = build_inp_embd(model.tok_embd);
  5613. // inp_pos - contains the positions
  5614. ggml_tensor * inp_pos = build_inp_pos();
  5615. auto * inp_attn = build_attn_inp_kv_unified();
  5616. for (int il = 0; il < n_layer; ++il) {
  5617. ggml_tensor * inpSA = inpL;
  5618. // norm
  5619. cur = build_norm(inpL,
  5620. model.layers[il].attn_norm, NULL,
  5621. LLM_NORM_RMS, il);
  5622. cb(cur, "attn_norm", il);
  5623. // self_attention
  5624. {
  5625. // compute Q and K and RoPE them
  5626. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5627. cb(Qcur, "Qcur", il);
  5628. if (model.layers[il].bq) {
  5629. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5630. cb(Qcur, "Qcur", il);
  5631. }
  5632. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5633. cb(Kcur, "Kcur", il);
  5634. if (model.layers[il].bk) {
  5635. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5636. cb(Kcur, "Kcur", il);
  5637. }
  5638. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5639. cb(Vcur, "Vcur", il);
  5640. if (model.layers[il].bv) {
  5641. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5642. cb(Vcur, "Vcur", il);
  5643. }
  5644. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5645. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5646. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5647. Qcur = ggml_rope_ext(
  5648. ctx0, Qcur, inp_pos, nullptr,
  5649. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5650. ext_factor, attn_factor, beta_fast, beta_slow
  5651. );
  5652. Kcur = ggml_rope_ext(
  5653. ctx0, Kcur, inp_pos, nullptr,
  5654. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5655. ext_factor, attn_factor, beta_fast, beta_slow
  5656. );
  5657. cb(Qcur, "Qcur", il);
  5658. cb(Kcur, "Kcur", il);
  5659. cb(Vcur, "Vcur", il);
  5660. cur = build_attn(inp_attn, gf,
  5661. model.layers[il].wo, model.layers[il].bo,
  5662. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5663. }
  5664. if (il == n_layer - 1) {
  5665. // skip computing output for unused tokens
  5666. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5667. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5668. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5669. }
  5670. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5671. cb(ffn_inp, "ffn_inp", il);
  5672. // MoE branch
  5673. cur = build_norm(ffn_inp,
  5674. model.layers[il].ffn_norm, NULL,
  5675. LLM_NORM_RMS, il);
  5676. cb(cur, "ffn_norm", il);
  5677. ggml_tensor * moe_out =
  5678. build_moe_ffn(cur,
  5679. model.layers[il].ffn_gate_inp,
  5680. model.layers[il].ffn_up_exps,
  5681. model.layers[il].ffn_gate_exps,
  5682. model.layers[il].ffn_down_exps,
  5683. nullptr,
  5684. n_expert, n_expert_used,
  5685. LLM_FFN_SILU, false,
  5686. false, 0.0,
  5687. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  5688. il);
  5689. cb(moe_out, "ffn_moe_out", il);
  5690. // FFN shared expert
  5691. {
  5692. ggml_tensor * cur_gate_inp = build_lora_mm(model.layers[il].ffn_gate_inp_shexp, cur);
  5693. cb(cur_gate_inp, "ffn_shexp_gate_inp", il);
  5694. // sigmoid
  5695. ggml_tensor * cur_gate = ggml_div(ctx0, ggml_silu(ctx0, cur_gate_inp), cur_gate_inp);
  5696. cb(cur_gate, "ffn_shexp_gate", il);
  5697. ggml_tensor * cur_ffn = build_ffn(cur,
  5698. model.layers[il].ffn_up_shexp, NULL, NULL,
  5699. model.layers[il].ffn_gate_shexp, NULL, NULL,
  5700. model.layers[il].ffn_down_shexp, NULL, NULL,
  5701. NULL,
  5702. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5703. cb(cur_ffn, "ffn_shexp", il);
  5704. ggml_tensor * ffn_shexp_out = ggml_mul(ctx0, cur_ffn, cur_gate);
  5705. cb(ffn_shexp_out, "ffn_shexp_out", il);
  5706. moe_out = ggml_add(ctx0, moe_out, ffn_shexp_out);
  5707. cb(moe_out, "ffn_out", il);
  5708. cur = moe_out;
  5709. }
  5710. cur = ggml_add(ctx0, cur, ffn_inp);
  5711. cur = build_cvec(cur, il);
  5712. cb(cur, "l_out", il);
  5713. // input for next layer
  5714. inpL = cur;
  5715. }
  5716. cur = inpL;
  5717. cur = build_norm(cur,
  5718. model.output_norm, NULL,
  5719. LLM_NORM_RMS, -1);
  5720. cb(cur, "result_norm", -1);
  5721. res->t_embd = cur;
  5722. // lm_head
  5723. cur = build_lora_mm(model.output, cur);
  5724. cb(cur, "result_output", -1);
  5725. res->t_logits = cur;
  5726. ggml_build_forward_expand(gf, cur);
  5727. }
  5728. };
  5729. struct llm_build_qwen3 : public llm_graph_context {
  5730. llm_build_qwen3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5731. const int64_t n_embd_head = hparams.n_embd_head_v;
  5732. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5733. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5734. ggml_tensor * cur;
  5735. ggml_tensor * inpL;
  5736. inpL = build_inp_embd(model.tok_embd);
  5737. // inp_pos - contains the positions
  5738. ggml_tensor * inp_pos = build_inp_pos();
  5739. auto * inp_attn = build_attn_inp_kv_unified();
  5740. for (int il = 0; il < n_layer; ++il) {
  5741. ggml_tensor * inpSA = inpL;
  5742. // norm
  5743. cur = build_norm(inpL,
  5744. model.layers[il].attn_norm, NULL,
  5745. LLM_NORM_RMS, il);
  5746. cb(cur, "attn_norm", il);
  5747. // self-attention
  5748. {
  5749. // compute Q and K and RoPE them
  5750. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5751. cb(Qcur, "Qcur", il);
  5752. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5753. cb(Kcur, "Kcur", il);
  5754. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5755. cb(Vcur, "Vcur", il);
  5756. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5757. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5758. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5759. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
  5760. cb(Qcur, "Qcur_normed", il);
  5761. Qcur = ggml_rope_ext(
  5762. ctx0, Qcur, inp_pos, nullptr,
  5763. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5764. ext_factor, attn_factor, beta_fast, beta_slow
  5765. );
  5766. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
  5767. cb(Kcur, "Kcur_normed", il);
  5768. Kcur = ggml_rope_ext(
  5769. ctx0, Kcur, inp_pos, nullptr,
  5770. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5771. ext_factor, attn_factor, beta_fast, beta_slow
  5772. );
  5773. cb(Qcur, "Qcur", il);
  5774. cb(Kcur, "Kcur", il);
  5775. cb(Vcur, "Vcur", il);
  5776. cur = build_attn(inp_attn, gf,
  5777. model.layers[il].wo, model.layers[il].bo,
  5778. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5779. }
  5780. if (il == n_layer - 1) {
  5781. // skip computing output for unused tokens
  5782. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5783. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5784. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5785. }
  5786. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5787. cb(ffn_inp, "ffn_inp", il);
  5788. // feed-forward network
  5789. cur = build_norm(ffn_inp,
  5790. model.layers[il].ffn_norm, NULL,
  5791. LLM_NORM_RMS, il);
  5792. cb(cur, "ffn_norm", il);
  5793. cur = build_ffn(cur,
  5794. model.layers[il].ffn_up, NULL, NULL,
  5795. model.layers[il].ffn_gate, NULL, NULL,
  5796. model.layers[il].ffn_down, NULL, NULL,
  5797. NULL,
  5798. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5799. cb(cur, "ffn_out", il);
  5800. cur = ggml_add(ctx0, cur, ffn_inp);
  5801. cur = build_cvec(cur, il);
  5802. cb(cur, "l_out", il);
  5803. // input for next layer
  5804. inpL = cur;
  5805. }
  5806. cur = inpL;
  5807. cur = build_norm(cur,
  5808. model.output_norm, NULL,
  5809. LLM_NORM_RMS, -1);
  5810. cb(cur, "result_norm", -1);
  5811. res->t_embd = cur;
  5812. // lm_head
  5813. cur = build_lora_mm(model.output, cur);
  5814. cb(cur, "result_output", -1);
  5815. res->t_logits = cur;
  5816. ggml_build_forward_expand(gf, cur);
  5817. }
  5818. };
  5819. struct llm_build_qwen3moe : public llm_graph_context {
  5820. llm_build_qwen3moe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5821. const int64_t n_embd_head = hparams.n_embd_head_v;
  5822. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5823. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5824. ggml_tensor * cur;
  5825. ggml_tensor * inpL;
  5826. inpL = build_inp_embd(model.tok_embd);
  5827. // inp_pos - contains the positions
  5828. ggml_tensor * inp_pos = build_inp_pos();
  5829. auto * inp_attn = build_attn_inp_kv_unified();
  5830. for (int il = 0; il < n_layer; ++il) {
  5831. ggml_tensor * inpSA = inpL;
  5832. // norm
  5833. cur = build_norm(inpL,
  5834. model.layers[il].attn_norm, NULL,
  5835. LLM_NORM_RMS, il);
  5836. cb(cur, "attn_norm", il);
  5837. // self_attention
  5838. {
  5839. // compute Q and K and RoPE them
  5840. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5841. cb(Qcur, "Qcur", il);
  5842. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5843. cb(Kcur, "Kcur", il);
  5844. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5845. cb(Vcur, "Vcur", il);
  5846. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5847. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5848. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5849. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
  5850. cb(Qcur, "Qcur_normed", il);
  5851. Qcur = ggml_rope_ext(
  5852. ctx0, Qcur, inp_pos, nullptr,
  5853. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5854. ext_factor, attn_factor, beta_fast, beta_slow
  5855. );
  5856. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
  5857. cb(Kcur, "Kcur_normed", il);
  5858. Kcur = ggml_rope_ext(
  5859. ctx0, Kcur, inp_pos, nullptr,
  5860. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5861. ext_factor, attn_factor, beta_fast, beta_slow
  5862. );
  5863. cb(Qcur, "Qcur", il);
  5864. cb(Kcur, "Kcur", il);
  5865. cb(Vcur, "Vcur", il);
  5866. cur = build_attn(inp_attn, gf,
  5867. model.layers[il].wo, model.layers[il].bo,
  5868. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5869. }
  5870. if (il == n_layer - 1) {
  5871. // skip computing output for unused tokens
  5872. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5873. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5874. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5875. }
  5876. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5877. cb(ffn_inp, "ffn_inp", il);
  5878. // MoE branch
  5879. cur = build_norm(ffn_inp,
  5880. model.layers[il].ffn_norm, NULL,
  5881. LLM_NORM_RMS, il);
  5882. cb(cur, "ffn_norm", il);
  5883. ggml_tensor * moe_out =
  5884. build_moe_ffn(cur,
  5885. model.layers[il].ffn_gate_inp,
  5886. model.layers[il].ffn_up_exps,
  5887. model.layers[il].ffn_gate_exps,
  5888. model.layers[il].ffn_down_exps,
  5889. nullptr,
  5890. n_expert, n_expert_used,
  5891. LLM_FFN_SILU, true,
  5892. false, 0.0,
  5893. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  5894. il);
  5895. cb(moe_out, "ffn_moe_out", il);
  5896. cur = moe_out;
  5897. cur = ggml_add(ctx0, cur, ffn_inp);
  5898. cur = build_cvec(cur, il);
  5899. cb(cur, "l_out", il);
  5900. // input for next layer
  5901. inpL = cur;
  5902. }
  5903. cur = inpL;
  5904. cur = build_norm(cur,
  5905. model.output_norm, NULL,
  5906. LLM_NORM_RMS, -1);
  5907. cb(cur, "result_norm", -1);
  5908. res->t_embd = cur;
  5909. // lm_head
  5910. cur = build_lora_mm(model.output, cur);
  5911. cb(cur, "result_output", -1);
  5912. res->t_logits = cur;
  5913. ggml_build_forward_expand(gf, cur);
  5914. }
  5915. };
  5916. struct llm_build_phi2 : public llm_graph_context {
  5917. llm_build_phi2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5918. const int64_t n_embd_head = hparams.n_embd_head_v;
  5919. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5920. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5921. ggml_tensor * cur;
  5922. ggml_tensor * attn_norm_output;
  5923. ggml_tensor * ffn_output;
  5924. ggml_tensor * inpL;
  5925. inpL = build_inp_embd(model.tok_embd);
  5926. // inp_pos - contains the positions
  5927. ggml_tensor * inp_pos = build_inp_pos();
  5928. auto * inp_attn = build_attn_inp_kv_unified();
  5929. for (int il = 0; il < n_layer; ++il) {
  5930. attn_norm_output = build_norm(inpL,
  5931. model.layers[il].attn_norm,
  5932. model.layers[il].attn_norm_b,
  5933. LLM_NORM, il);
  5934. cb(attn_norm_output, "attn_norm", il);
  5935. // self-attention
  5936. {
  5937. ggml_tensor * Qcur = nullptr;
  5938. ggml_tensor * Kcur = nullptr;
  5939. ggml_tensor * Vcur = nullptr;
  5940. if (model.layers[il].wqkv) {
  5941. cur = build_lora_mm(model.layers[il].wqkv, attn_norm_output);
  5942. cb(cur, "wqkv", il);
  5943. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5944. cb(cur, "bqkv", il);
  5945. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5946. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5947. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5948. } else {
  5949. Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, attn_norm_output), model.layers[il].bq);
  5950. Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, attn_norm_output), model.layers[il].bk);
  5951. Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, attn_norm_output), model.layers[il].bv);
  5952. }
  5953. cb(Qcur, "Qcur", il);
  5954. cb(Kcur, "Kcur", il);
  5955. cb(Vcur, "Vcur", il);
  5956. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5957. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5958. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5959. Qcur = ggml_rope_ext(
  5960. ctx0, Qcur, inp_pos, nullptr,
  5961. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5962. ext_factor, attn_factor, beta_fast, beta_slow
  5963. );
  5964. Kcur = ggml_rope_ext(
  5965. ctx0, Kcur, inp_pos, nullptr,
  5966. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5967. ext_factor, attn_factor, beta_fast, beta_slow
  5968. );
  5969. cb(Qcur, "Qcur", il);
  5970. cb(Kcur, "Kcur", il);
  5971. cb(Vcur, "Vcur", il);
  5972. // with phi2, we scale the Q to avoid precision issues
  5973. // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
  5974. Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head)));
  5975. cur = build_attn(inp_attn, gf,
  5976. model.layers[il].wo, model.layers[il].bo,
  5977. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
  5978. }
  5979. if (il == n_layer - 1) {
  5980. // skip computing output for unused tokens
  5981. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5982. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5983. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  5984. attn_norm_output = ggml_get_rows(ctx0, attn_norm_output, inp_out_ids);
  5985. }
  5986. // FF
  5987. {
  5988. ffn_output = build_ffn(attn_norm_output,
  5989. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  5990. NULL, NULL, NULL,
  5991. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  5992. NULL,
  5993. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  5994. cb(ffn_output, "ffn_out", il);
  5995. }
  5996. cur = ggml_add(ctx0, cur, ffn_output);
  5997. cur = ggml_add(ctx0, cur, inpL);
  5998. cur = build_cvec(cur, il);
  5999. cb(cur, "l_out", il);
  6000. // input for next layer
  6001. inpL = cur;
  6002. }
  6003. cur = build_norm(inpL,
  6004. model.output_norm,
  6005. model.output_norm_b,
  6006. LLM_NORM, -1);
  6007. cb(cur, "result_norm", -1);
  6008. res->t_embd = cur;
  6009. cur = build_lora_mm(model.output, cur);
  6010. cb(cur, "result_output_no_bias", -1);
  6011. cur = ggml_add(ctx0, cur, model.output_b);
  6012. cb(cur, "result_output", -1);
  6013. res->t_logits = cur;
  6014. ggml_build_forward_expand(gf, cur);
  6015. }
  6016. };
  6017. template<bool iswa>
  6018. struct llm_build_phi3 : public llm_graph_context {
  6019. llm_build_phi3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6020. const int64_t n_embd_head = hparams.n_embd_head_v;
  6021. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6022. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6023. ggml_tensor * cur;
  6024. ggml_tensor * inpL;
  6025. inpL = build_inp_embd(model.tok_embd);
  6026. // inp_pos - contains the positions
  6027. ggml_tensor * inp_pos = build_inp_pos();
  6028. using inp_attn_type = std::conditional_t<iswa, llm_graph_input_attn_kv_unified_iswa, llm_graph_input_attn_kv_unified>;
  6029. inp_attn_type * inp_attn = nullptr;
  6030. if constexpr (iswa) {
  6031. inp_attn = build_attn_inp_kv_unified_iswa();
  6032. } else {
  6033. inp_attn = build_attn_inp_kv_unified();
  6034. }
  6035. for (int il = 0; il < n_layer; ++il) {
  6036. auto * residual = inpL;
  6037. // self-attention
  6038. {
  6039. // rope freq factors for 128k context
  6040. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  6041. ggml_tensor* attn_norm_output = build_norm(inpL,
  6042. model.layers[il].attn_norm,
  6043. model.layers[il].attn_norm_b,
  6044. LLM_NORM_RMS, il);
  6045. cb(attn_norm_output, "attn_norm", il);
  6046. ggml_tensor * Qcur = nullptr;
  6047. ggml_tensor * Kcur = nullptr;
  6048. ggml_tensor * Vcur = nullptr;
  6049. if (model.layers[il].wqkv) {
  6050. cur = build_lora_mm(model.layers[il].wqkv, attn_norm_output);
  6051. cb(cur, "wqkv", il);
  6052. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd)));
  6053. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd)));
  6054. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa)));
  6055. } else {
  6056. Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, attn_norm_output), model.layers[il].bq);
  6057. Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, attn_norm_output), model.layers[il].bk);
  6058. Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, attn_norm_output), model.layers[il].bv);
  6059. }
  6060. cb(Qcur, "Qcur", il);
  6061. cb(Kcur, "Kcur", il);
  6062. cb(Vcur, "Vcur", il);
  6063. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6064. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6065. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6066. Qcur = ggml_rope_ext(
  6067. ctx0, Qcur, inp_pos, rope_factors,
  6068. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6069. ext_factor, attn_factor, beta_fast, beta_slow
  6070. );
  6071. Kcur = ggml_rope_ext(
  6072. ctx0, Kcur, inp_pos, rope_factors,
  6073. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6074. ext_factor, attn_factor, beta_fast, beta_slow
  6075. );
  6076. cb(Qcur, "Qcur", il);
  6077. cb(Kcur, "Kcur", il);
  6078. cb(Vcur, "Vcur", il);
  6079. Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head)));
  6080. cb(Qcur, "Qcur", il);
  6081. cur = build_attn(inp_attn, gf,
  6082. model.layers[il].wo, model.layers[il].bo,
  6083. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
  6084. }
  6085. if (il == n_layer - 1) {
  6086. // skip computing output for unused tokens
  6087. ggml_tensor* inp_out_ids = build_inp_out_ids();
  6088. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6089. residual = ggml_get_rows(ctx0, residual, inp_out_ids);
  6090. }
  6091. cur = ggml_add(ctx0, cur, residual);
  6092. residual = cur;
  6093. cur = build_norm(cur,
  6094. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  6095. LLM_NORM_RMS, il);
  6096. cb(cur, "ffn_norm", il);
  6097. // feed-forward network
  6098. if (model.layers[il].ffn_gate_inp == nullptr) {
  6099. cur = build_ffn(cur,
  6100. model.layers[il].ffn_up, NULL, NULL,
  6101. NULL, NULL, NULL,
  6102. model.layers[il].ffn_down, NULL, NULL,
  6103. NULL,
  6104. LLM_FFN_SWIGLU, LLM_FFN_SEQ, il);
  6105. cb(cur, "ffn_out", il);
  6106. } else {
  6107. // MoE branch
  6108. cur = build_moe_ffn(cur,
  6109. model.layers[il].ffn_gate_inp,
  6110. model.layers[il].ffn_up_exps,
  6111. model.layers[il].ffn_gate_exps,
  6112. model.layers[il].ffn_down_exps,
  6113. nullptr,
  6114. n_expert, n_expert_used,
  6115. LLM_FFN_SILU, true,
  6116. false, 0.0,
  6117. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  6118. il);
  6119. cb(cur, "ffn_moe_out", il);
  6120. }
  6121. cur = ggml_add(ctx0, residual, cur);
  6122. cur = build_cvec(cur, il);
  6123. cb(cur, "l_out", il);
  6124. // input for next layer
  6125. inpL = cur;
  6126. }
  6127. cur = build_norm(inpL,
  6128. model.output_norm,
  6129. model.output_norm_b,
  6130. LLM_NORM_RMS, -1);
  6131. cb(cur, "result_norm", -1);
  6132. res->t_embd = cur;
  6133. cur = build_lora_mm(model.output, cur);
  6134. if (model.output_b != nullptr) {
  6135. cb(cur, "result_output_no_bias", -1);
  6136. cur = ggml_add(ctx0, cur, model.output_b);
  6137. }
  6138. cb(cur, "result_output", -1);
  6139. res->t_logits = cur;
  6140. ggml_build_forward_expand(gf, cur);
  6141. }
  6142. };
  6143. struct llm_build_plamo : public llm_graph_context {
  6144. llm_build_plamo(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6145. const int64_t n_embd_head = hparams.n_embd_head_v;
  6146. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6147. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6148. ggml_tensor * cur;
  6149. ggml_tensor * inpL;
  6150. inpL = build_inp_embd(model.tok_embd);
  6151. // inp_pos - contains the positions
  6152. ggml_tensor * inp_pos = build_inp_pos();
  6153. auto * inp_attn = build_attn_inp_kv_unified();
  6154. for (int il = 0; il < n_layer; ++il) {
  6155. // norm
  6156. cur = build_norm(inpL,
  6157. model.layers[il].attn_norm, NULL,
  6158. LLM_NORM_RMS, il);
  6159. cb(cur, "attn_norm", il);
  6160. ggml_tensor * attention_norm = cur;
  6161. // self-attention
  6162. {
  6163. // compute Q and K and RoPE them
  6164. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6165. cb(Qcur, "Qcur", il);
  6166. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6167. cb(Kcur, "Kcur", il);
  6168. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6169. cb(Vcur, "Vcur", il);
  6170. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6171. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6172. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6173. Qcur = ggml_rope_ext(
  6174. ctx0, Qcur, inp_pos, nullptr,
  6175. n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale,
  6176. ext_factor, attn_factor, beta_fast, beta_slow
  6177. );
  6178. Kcur = ggml_rope_ext(
  6179. ctx0, Kcur, inp_pos, nullptr,
  6180. n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale,
  6181. ext_factor, attn_factor, beta_fast, beta_slow
  6182. );
  6183. cb(Qcur, "Qcur", il);
  6184. cb(Kcur, "Kcur", il);
  6185. cb(Vcur, "Vcur", il);
  6186. cur = build_attn(inp_attn, gf,
  6187. model.layers[il].wo, NULL,
  6188. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  6189. }
  6190. ggml_tensor * sa_out = cur;
  6191. cur = attention_norm;
  6192. if (il == n_layer - 1) {
  6193. // skip computing output for unused tokens
  6194. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6195. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6196. sa_out = ggml_get_rows(ctx0, sa_out, inp_out_ids);
  6197. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6198. }
  6199. // feed-forward network
  6200. {
  6201. cur = build_ffn(cur,
  6202. model.layers[il].ffn_up, NULL, NULL,
  6203. model.layers[il].ffn_gate, NULL, NULL,
  6204. model.layers[il].ffn_down, NULL, NULL,
  6205. NULL,
  6206. LLM_FFN_SILU, LLM_FFN_PAR, il);
  6207. cb(cur, "ffn_out", il);
  6208. }
  6209. cur = ggml_add(ctx0, cur, sa_out);
  6210. cur = ggml_add(ctx0, cur, inpL);
  6211. cur = build_cvec(cur, il);
  6212. cb(cur, "l_out", il);
  6213. // input for next layer
  6214. inpL = cur;
  6215. }
  6216. cur = inpL;
  6217. cur = build_norm(cur,
  6218. model.output_norm, NULL,
  6219. LLM_NORM_RMS, -1);
  6220. cb(cur, "result_norm", -1);
  6221. res->t_embd = cur;
  6222. // lm_head
  6223. cur = build_lora_mm(model.output, cur);
  6224. cb(cur, "result_output", -1);
  6225. res->t_logits = cur;
  6226. ggml_build_forward_expand(gf, cur);
  6227. }
  6228. };
  6229. struct llm_build_gpt2 : public llm_graph_context {
  6230. llm_build_gpt2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6231. const int64_t n_embd_head = hparams.n_embd_head_v;
  6232. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6233. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6234. ggml_tensor * cur;
  6235. ggml_tensor * pos;
  6236. ggml_tensor * inpL;
  6237. inpL = build_inp_embd(model.tok_embd);
  6238. // inp_pos - contains the positions
  6239. ggml_tensor * inp_pos = build_inp_pos();
  6240. auto * inp_attn = build_attn_inp_kv_unified();
  6241. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  6242. cb(pos, "pos_embd", -1);
  6243. inpL = ggml_add(ctx0, inpL, pos);
  6244. cb(inpL, "inpL", -1);
  6245. for (int il = 0; il < n_layer; ++il) {
  6246. cur = build_norm(inpL,
  6247. model.layers[il].attn_norm,
  6248. model.layers[il].attn_norm_b,
  6249. LLM_NORM, il);
  6250. cb(cur, "attn_norm", il);
  6251. // self-attention
  6252. {
  6253. cur = build_lora_mm(model.layers[il].wqkv, cur);
  6254. cb(cur, "wqkv", il);
  6255. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  6256. cb(cur, "bqkv", il);
  6257. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6258. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6259. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6260. cb(Qcur, "Qcur", il);
  6261. cb(Kcur, "Kcur", il);
  6262. cb(Vcur, "Vcur", il);
  6263. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6264. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6265. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6266. cur = build_attn(inp_attn, gf,
  6267. model.layers[il].wo, model.layers[il].bo,
  6268. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  6269. }
  6270. if (il == n_layer - 1) {
  6271. // skip computing output for unused tokens
  6272. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6273. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6274. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6275. }
  6276. // add the input
  6277. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  6278. cb(ffn_inp, "ffn_inp", il);
  6279. // FF
  6280. {
  6281. cur = build_norm(ffn_inp,
  6282. model.layers[il].ffn_norm,
  6283. model.layers[il].ffn_norm_b,
  6284. LLM_NORM, il);
  6285. cb(cur, "ffn_norm", il);
  6286. cur = build_ffn(cur,
  6287. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  6288. NULL, NULL, NULL,
  6289. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  6290. NULL,
  6291. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  6292. cb(cur, "ffn_out", il);
  6293. }
  6294. cur = ggml_add(ctx0, cur, ffn_inp);
  6295. cur = build_cvec(cur, il);
  6296. cb(cur, "l_out", il);
  6297. // input for next layer
  6298. inpL = cur;
  6299. }
  6300. cur = build_norm(inpL,
  6301. model.output_norm,
  6302. model.output_norm_b,
  6303. LLM_NORM, -1);
  6304. cb(cur, "result_norm", -1);
  6305. res->t_embd = cur;
  6306. cur = build_lora_mm(model.output, cur);
  6307. cb(cur, "result_output", -1);
  6308. res->t_logits = cur;
  6309. ggml_build_forward_expand(gf, cur);
  6310. }
  6311. };
  6312. struct llm_build_codeshell : public llm_graph_context {
  6313. llm_build_codeshell(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6314. const int64_t n_embd_head = hparams.n_embd_head_v;
  6315. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6316. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6317. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6318. ggml_tensor * cur;
  6319. ggml_tensor * inpL;
  6320. inpL = build_inp_embd(model.tok_embd);
  6321. // inp_pos - contains the positions
  6322. ggml_tensor * inp_pos = build_inp_pos();
  6323. auto * inp_attn = build_attn_inp_kv_unified();
  6324. for (int il = 0; il < n_layer; ++il) {
  6325. cur = build_norm(inpL,
  6326. model.layers[il].attn_norm,
  6327. model.layers[il].attn_norm_b,
  6328. LLM_NORM, il);
  6329. cb(cur, "attn_norm", il);
  6330. // self-attention
  6331. {
  6332. cur = build_lora_mm(model.layers[il].wqkv, cur);
  6333. cb(cur, "wqkv", il);
  6334. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  6335. cb(cur, "bqkv", il);
  6336. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6337. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6338. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6339. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6340. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6341. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6342. Qcur = ggml_rope_ext(
  6343. ctx0, Qcur, inp_pos, nullptr,
  6344. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6345. ext_factor, attn_factor, beta_fast, beta_slow
  6346. );
  6347. Kcur = ggml_rope_ext(
  6348. ctx0, Kcur, inp_pos, nullptr,
  6349. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6350. ext_factor, attn_factor, beta_fast, beta_slow
  6351. );
  6352. cb(Qcur, "Qcur", il);
  6353. cb(Kcur, "Kcur", il);
  6354. cb(Vcur, "Vcur", il);
  6355. cur = build_attn(inp_attn, gf,
  6356. model.layers[il].wo, model.layers[il].bo,
  6357. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  6358. }
  6359. if (il == n_layer - 1) {
  6360. // skip computing output for unused tokens
  6361. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6362. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6363. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6364. }
  6365. // add the input
  6366. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  6367. cb(ffn_inp, "ffn_inp", il);
  6368. // FF
  6369. {
  6370. cur = build_norm(ffn_inp,
  6371. model.layers[il].ffn_norm,
  6372. model.layers[il].ffn_norm_b,
  6373. LLM_NORM, il);
  6374. cb(cur, "ffn_norm", il);
  6375. cur = build_ffn(cur,
  6376. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  6377. NULL, NULL, NULL,
  6378. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  6379. NULL,
  6380. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  6381. cb(cur, "ffn_out", il);
  6382. }
  6383. cur = ggml_add(ctx0, cur, ffn_inp);
  6384. cur = build_cvec(cur, il);
  6385. cb(cur, "l_out", il);
  6386. // input for next layer
  6387. inpL = cur;
  6388. }
  6389. cur = build_norm(inpL,
  6390. model.output_norm,
  6391. model.output_norm_b,
  6392. LLM_NORM, -1);
  6393. cb(cur, "result_norm", -1);
  6394. res->t_embd = cur;
  6395. cur = build_lora_mm(model.output, cur);
  6396. cb(cur, "result_output", -1);
  6397. res->t_logits = cur;
  6398. ggml_build_forward_expand(gf, cur);
  6399. }
  6400. };
  6401. struct llm_build_orion : public llm_graph_context {
  6402. llm_build_orion(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6403. const int64_t n_embd_head = hparams.n_embd_head_v;
  6404. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6405. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6406. ggml_tensor * cur;
  6407. ggml_tensor * inpL;
  6408. inpL = build_inp_embd(model.tok_embd);
  6409. // inp_pos - contains the positions
  6410. ggml_tensor * inp_pos = build_inp_pos();
  6411. auto * inp_attn = build_attn_inp_kv_unified();
  6412. for (int il = 0; il < n_layer; ++il) {
  6413. ggml_tensor * inpSA = inpL;
  6414. // norm
  6415. cur = build_norm(inpL,
  6416. model.layers[il].attn_norm, model.layers[il].attn_norm_b,
  6417. LLM_NORM, il);
  6418. cb(cur, "attn_norm", il);
  6419. // self-attention
  6420. {
  6421. // compute Q and K and RoPE them
  6422. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6423. cb(Qcur, "Qcur", il);
  6424. // if (model.layers[il].bq) {
  6425. // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6426. // cb(Qcur, "Qcur", il);
  6427. // }
  6428. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6429. cb(Kcur, "Kcur", il);
  6430. // if (model.layers[il].bk) {
  6431. // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6432. // cb(Kcur, "Kcur", il);
  6433. // }
  6434. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6435. cb(Vcur, "Vcur", il);
  6436. // if (model.layers[il].bv) {
  6437. // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6438. // cb(Vcur, "Vcur", il);
  6439. // }
  6440. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6441. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6442. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6443. Qcur = ggml_rope_ext(
  6444. ctx0, Qcur, inp_pos, nullptr,
  6445. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6446. ext_factor, attn_factor, beta_fast, beta_slow
  6447. );
  6448. Kcur = ggml_rope_ext(
  6449. ctx0, Kcur, inp_pos, nullptr,
  6450. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6451. ext_factor, attn_factor, beta_fast, beta_slow
  6452. );
  6453. cb(Qcur, "Qcur", il);
  6454. cb(Kcur, "Kcur", il);
  6455. cb(Vcur, "Vcur", il);
  6456. cur = build_attn(inp_attn, gf,
  6457. model.layers[il].wo, NULL,
  6458. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  6459. }
  6460. if (il == n_layer - 1) {
  6461. // skip computing output for unused tokens
  6462. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6463. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6464. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6465. }
  6466. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6467. cb(ffn_inp, "ffn_inp", il);
  6468. // feed-forward network
  6469. cur = build_norm(ffn_inp,
  6470. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  6471. LLM_NORM, il);
  6472. cb(cur, "ffn_norm", il);
  6473. cur = build_ffn(cur,
  6474. model.layers[il].ffn_up, NULL, NULL,
  6475. model.layers[il].ffn_gate, NULL, NULL,
  6476. model.layers[il].ffn_down, NULL, NULL,
  6477. NULL,
  6478. LLM_FFN_SILU, LLM_FFN_PAR, il);
  6479. cb(cur, "ffn_out", il);
  6480. cur = ggml_add(ctx0, cur, ffn_inp);
  6481. cur = build_cvec(cur, il);
  6482. cb(cur, "l_out", il);
  6483. // input for next layer
  6484. inpL = cur;
  6485. }
  6486. cur = inpL;
  6487. cur = build_norm(cur,
  6488. model.output_norm, model.output_norm_b,
  6489. LLM_NORM, -1);
  6490. cb(cur, "result_norm", -1);
  6491. res->t_embd = cur;
  6492. // lm_head
  6493. cur = build_lora_mm(model.output, cur);
  6494. cb(cur, "result_output", -1);
  6495. res->t_logits = cur;
  6496. ggml_build_forward_expand(gf, cur);
  6497. }
  6498. };
  6499. struct llm_build_internlm2 : public llm_graph_context {
  6500. llm_build_internlm2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6501. const int64_t n_embd_head = hparams.n_embd_head_v;
  6502. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6503. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6504. ggml_tensor * cur;
  6505. ggml_tensor * inpL;
  6506. inpL = build_inp_embd(model.tok_embd);
  6507. // inp_pos - contains the positions
  6508. ggml_tensor * inp_pos = build_inp_pos();
  6509. auto * inp_attn = build_attn_inp_kv_unified();
  6510. for (int il = 0; il < n_layer; ++il) {
  6511. ggml_tensor * inpSA = inpL;
  6512. // norm
  6513. cur = build_norm(inpL,
  6514. model.layers[il].attn_norm, NULL,
  6515. LLM_NORM_RMS, il);
  6516. cb(cur, "attn_norm", il);
  6517. // self-attention
  6518. {
  6519. // compute Q and K and RoPE them
  6520. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6521. cb(Qcur, "Qcur", il);
  6522. if (model.layers[il].bq) {
  6523. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6524. cb(Qcur, "Qcur", il);
  6525. }
  6526. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6527. cb(Kcur, "Kcur", il);
  6528. if (model.layers[il].bk) {
  6529. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6530. cb(Kcur, "Kcur", il);
  6531. }
  6532. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6533. cb(Vcur, "Vcur", il);
  6534. if (model.layers[il].bv) {
  6535. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6536. cb(Vcur, "Vcur", il);
  6537. }
  6538. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6539. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6540. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6541. Qcur = ggml_rope_ext(
  6542. ctx0, Qcur, inp_pos, nullptr,
  6543. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6544. ext_factor, attn_factor, beta_fast, beta_slow
  6545. );
  6546. Kcur = ggml_rope_ext(
  6547. ctx0, Kcur, inp_pos, nullptr,
  6548. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6549. ext_factor, attn_factor, beta_fast, beta_slow
  6550. );
  6551. cb(Qcur, "Qcur", il);
  6552. cb(Kcur, "Kcur", il);
  6553. cb(Vcur, "Vcur", il);
  6554. cur = build_attn(inp_attn, gf,
  6555. model.layers[il].wo, model.layers[il].bo,
  6556. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  6557. }
  6558. if (il == n_layer - 1) {
  6559. // skip computing output for unused tokens
  6560. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6561. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6562. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6563. }
  6564. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6565. cb(ffn_inp, "ffn_inp", il);
  6566. // feed-forward network
  6567. cur = build_norm(ffn_inp,
  6568. model.layers[il].ffn_norm, NULL,
  6569. LLM_NORM_RMS, il);
  6570. cb(cur, "ffn_norm", il);
  6571. cur = build_ffn(cur,
  6572. model.layers[il].ffn_up, NULL, NULL,
  6573. model.layers[il].ffn_gate, NULL, NULL,
  6574. model.layers[il].ffn_down, NULL, NULL,
  6575. NULL,
  6576. LLM_FFN_SILU, LLM_FFN_PAR, il);
  6577. cb(cur, "ffn_out", il);
  6578. cur = ggml_add(ctx0, cur, ffn_inp);
  6579. cur = build_cvec(cur, il);
  6580. cb(cur, "l_out", il);
  6581. // input for next layer
  6582. inpL = cur;
  6583. }
  6584. cur = inpL;
  6585. cur = build_norm(cur,
  6586. model.output_norm, NULL,
  6587. LLM_NORM_RMS, -1);
  6588. cb(cur, "result_norm", -1);
  6589. res->t_embd = cur;
  6590. // lm_head
  6591. cur = build_lora_mm(model.output, cur);
  6592. cb(cur, "result_output", -1);
  6593. res->t_logits = cur;
  6594. ggml_build_forward_expand(gf, cur);
  6595. }
  6596. };
  6597. struct llm_build_minicpm3 : public llm_graph_context {
  6598. llm_build_minicpm3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6599. //TODO: if the model varies, these parameters need to be read from the model
  6600. const int64_t n_embd_base = 256;
  6601. const float scale_embd = 12.0f;
  6602. const float scale_depth = 1.4f;
  6603. const float kq_scale = 1.0f / sqrtf(float(hparams.n_embd_head_k));
  6604. const uint32_t n_embd_head_qk_rope = hparams.n_rot;
  6605. const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  6606. const uint32_t kv_lora_rank = hparams.n_lora_kv;
  6607. ggml_tensor * cur;
  6608. ggml_tensor * inpL;
  6609. inpL = build_inp_embd(model.tok_embd);
  6610. // scale the input embeddings
  6611. inpL = ggml_scale(ctx0, inpL, scale_embd);
  6612. cb(inpL, "inp_scaled", -1);
  6613. // inp_pos - contains the positions
  6614. ggml_tensor * inp_pos = build_inp_pos();
  6615. auto * inp_attn = build_attn_inp_kv_unified();
  6616. for (int il = 0; il < n_layer; ++il) {
  6617. ggml_tensor * inpSA = inpL;
  6618. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  6619. // norm
  6620. cur = build_norm(inpL,
  6621. model.layers[il].attn_norm, NULL,
  6622. LLM_NORM_RMS, il);
  6623. cb(cur, "attn_norm", il);
  6624. // self_attention
  6625. {
  6626. ggml_tensor * q = NULL;
  6627. // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens}
  6628. q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
  6629. cb(q, "q", il);
  6630. q = build_norm(q,
  6631. model.layers[il].attn_q_a_norm, NULL,
  6632. LLM_NORM_RMS, il);
  6633. cb(q, "q", il);
  6634. // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens}
  6635. q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
  6636. cb(q, "q", il);
  6637. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  6638. ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
  6639. ggml_row_size(q->type, hparams.n_embd_head_k),
  6640. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  6641. 0);
  6642. cb(q_nope, "q_nope", il);
  6643. // and {n_head * n_embd_head_qk_rope, n_tokens}
  6644. ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
  6645. ggml_row_size(q->type, hparams.n_embd_head_k),
  6646. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  6647. ggml_row_size(q->type, n_embd_head_qk_nope));
  6648. cb(q_pe, "q_pe", il);
  6649. // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
  6650. ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
  6651. cb(kv_pe_compresseed, "kv_pe_compresseed", il);
  6652. // split into {kv_lora_rank, n_tokens}
  6653. ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
  6654. kv_pe_compresseed->nb[1],
  6655. 0);
  6656. cb(kv_compressed, "kv_compressed", il);
  6657. // and {n_embd_head_qk_rope, n_tokens}
  6658. ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
  6659. kv_pe_compresseed->nb[1],
  6660. kv_pe_compresseed->nb[1],
  6661. ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
  6662. cb(k_pe, "k_pe", il);
  6663. // TODO: the CUDA backend used to not support non-cont. (RMS) norm, investigate removing ggml_cont
  6664. kv_compressed = ggml_cont(ctx0, kv_compressed);
  6665. kv_compressed = build_norm(kv_compressed,
  6666. model.layers[il].attn_kv_a_norm, NULL,
  6667. LLM_NORM_RMS, il);
  6668. cb(kv_compressed, "kv_compressed", il);
  6669. // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
  6670. ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
  6671. cb(kv, "kv", il);
  6672. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  6673. ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
  6674. ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
  6675. ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  6676. 0);
  6677. cb(k_nope, "k_nope", il);
  6678. // and {n_head * n_embd_head_v, n_tokens}
  6679. ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
  6680. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  6681. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
  6682. ggml_row_size(kv->type, (n_embd_head_qk_nope)));
  6683. cb(v_states, "v_states", il);
  6684. v_states = ggml_cont(ctx0, v_states);
  6685. cb(v_states, "v_states", il);
  6686. v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,
  6687. ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),
  6688. 0);
  6689. cb(v_states, "v_states", il);
  6690. q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this
  6691. q_pe = ggml_rope_ext(
  6692. ctx0, q_pe, inp_pos, rope_factors,
  6693. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6694. ext_factor, attn_factor, beta_fast, beta_slow
  6695. );
  6696. cb(q_pe, "q_pe", il);
  6697. // shared RoPE key
  6698. k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this
  6699. k_pe = ggml_rope_ext(
  6700. ctx0, k_pe, inp_pos, rope_factors,
  6701. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6702. ext_factor, attn_factor, beta_fast, beta_slow
  6703. );
  6704. cb(k_pe, "k_pe", il);
  6705. ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);
  6706. cb(q_states, "q_states", il);
  6707. ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
  6708. cb(k_states, "k_states", il);
  6709. cur = build_attn(inp_attn, gf,
  6710. model.layers[il].wo, NULL,
  6711. q_states, k_states, v_states, nullptr, nullptr, kq_scale, il);
  6712. }
  6713. if (il == n_layer - 1) {
  6714. // skip computing output for unused tokens
  6715. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6716. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6717. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6718. }
  6719. // scale_res - scale the hidden states for residual connection
  6720. const float scale_res = scale_depth/sqrtf(float(n_layer));
  6721. cur = ggml_scale(ctx0, cur, scale_res);
  6722. cb(cur, "hidden_scaled", il);
  6723. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6724. cb(ffn_inp, "ffn_inp", il);
  6725. // feed-forward network
  6726. {
  6727. cur = build_norm(ffn_inp,
  6728. model.layers[il].ffn_norm, NULL,
  6729. LLM_NORM_RMS, il);
  6730. cb(cur, "ffn_norm", il);
  6731. cur = build_ffn(cur,
  6732. model.layers[il].ffn_up, NULL, NULL,
  6733. model.layers[il].ffn_gate, NULL, NULL,
  6734. model.layers[il].ffn_down, NULL, NULL,
  6735. NULL,
  6736. LLM_FFN_SILU, LLM_FFN_PAR, il);
  6737. cb(cur, "ffn_out", il);
  6738. }
  6739. // scale the hidden states for residual connection
  6740. cur = ggml_scale(ctx0, cur, scale_res);
  6741. cb(cur, "hidden_scaled_ffn", il);
  6742. cur = ggml_add(ctx0, cur, ffn_inp);
  6743. cur = build_cvec(cur, il);
  6744. cb(cur, "l_out", il);
  6745. // input for next layer
  6746. inpL = cur;
  6747. }
  6748. cur = inpL;
  6749. cur = build_norm(cur,
  6750. model.output_norm, NULL,
  6751. LLM_NORM_RMS, -1);
  6752. cb(cur, "result_norm", -1);
  6753. res->t_embd = cur;
  6754. // lm_head scaling
  6755. const float scale_lmhead = float(n_embd_base)/float(n_embd);
  6756. cur = ggml_scale(ctx0, cur, scale_lmhead);
  6757. cb(cur, "lmhead_scaling", -1);
  6758. // lm_head
  6759. cur = build_lora_mm(model.output, cur);
  6760. cb(cur, "result_output", -1);
  6761. res->t_logits = cur;
  6762. ggml_build_forward_expand(gf, cur);
  6763. }
  6764. };
  6765. struct llm_build_gemma : public llm_graph_context {
  6766. llm_build_gemma(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6767. const int64_t n_embd_head = hparams.n_embd_head_v;
  6768. ggml_tensor * cur;
  6769. ggml_tensor * inpL;
  6770. inpL = build_inp_embd(model.tok_embd);
  6771. inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
  6772. cb(inpL, "inp_scaled", -1);
  6773. // inp_pos - contains the positions
  6774. ggml_tensor * inp_pos = build_inp_pos();
  6775. auto * inp_attn = build_attn_inp_kv_unified();
  6776. for (int il = 0; il < n_layer; ++il) {
  6777. // norm
  6778. cur = build_norm(inpL,
  6779. model.layers[il].attn_norm, NULL,
  6780. LLM_NORM_RMS, il);
  6781. cb(cur, "attn_norm", il);
  6782. // self-attention
  6783. {
  6784. // compute Q and K and RoPE them
  6785. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6786. cb(Qcur, "Qcur", il);
  6787. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6788. cb(Kcur, "Kcur", il);
  6789. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6790. cb(Vcur, "Vcur", il);
  6791. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6792. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6793. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6794. Qcur = ggml_rope_ext(
  6795. ctx0, Qcur, inp_pos, nullptr,
  6796. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6797. ext_factor, attn_factor, beta_fast, beta_slow);
  6798. Kcur = ggml_rope_ext(
  6799. ctx0, Kcur, inp_pos, nullptr,
  6800. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6801. ext_factor, attn_factor, beta_fast, beta_slow);
  6802. cb(Qcur, "Qcur", il);
  6803. cb(Kcur, "Kcur", il);
  6804. cb(Vcur, "Vcur", il);
  6805. Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head)));
  6806. cb(Qcur, "Qcur_scaled", il);
  6807. cur = build_attn(inp_attn, gf,
  6808. model.layers[il].wo, NULL,
  6809. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
  6810. }
  6811. if (il == n_layer - 1) {
  6812. // skip computing output for unused tokens
  6813. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6814. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6815. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6816. }
  6817. ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
  6818. cb(sa_out, "sa_out", il);
  6819. cur = build_norm(sa_out,
  6820. model.layers[il].ffn_norm, NULL,
  6821. LLM_NORM_RMS, il);
  6822. cb(cur, "ffn_norm", il);
  6823. // feed-forward network
  6824. {
  6825. cur = build_ffn(cur,
  6826. model.layers[il].ffn_up, NULL, NULL,
  6827. model.layers[il].ffn_gate, NULL, NULL,
  6828. model.layers[il].ffn_down, NULL, NULL,
  6829. NULL,
  6830. LLM_FFN_GELU, LLM_FFN_PAR, il);
  6831. cb(cur, "ffn_out", il);
  6832. }
  6833. cur = ggml_add(ctx0, cur, sa_out);
  6834. cur = build_cvec(cur, il);
  6835. cb(cur, "l_out", il);
  6836. // input for next layer
  6837. inpL = cur;
  6838. }
  6839. cur = inpL;
  6840. cur = build_norm(cur,
  6841. model.output_norm, NULL,
  6842. LLM_NORM_RMS, -1);
  6843. cb(cur, "result_norm", -1);
  6844. res->t_embd = cur;
  6845. // lm_head
  6846. cur = build_lora_mm(model.output, cur);
  6847. cb(cur, "result_output", -1);
  6848. res->t_logits = cur;
  6849. ggml_build_forward_expand(gf, cur);
  6850. }
  6851. };
  6852. struct llm_build_gemma2_iswa : public llm_graph_context {
  6853. llm_build_gemma2_iswa(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6854. const int64_t n_embd_head = hparams.n_embd_head_k;
  6855. ggml_tensor * cur;
  6856. ggml_tensor * inpL;
  6857. inpL = build_inp_embd(model.tok_embd);
  6858. inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
  6859. cb(inpL, "inp_scaled", -1);
  6860. // inp_pos - contains the positions
  6861. ggml_tensor * inp_pos = build_inp_pos();
  6862. auto * inp_attn = build_attn_inp_kv_unified_iswa();
  6863. for (int il = 0; il < n_layer; ++il) {
  6864. // norm
  6865. cur = build_norm(inpL,
  6866. model.layers[il].attn_norm, NULL,
  6867. LLM_NORM_RMS, il);
  6868. cb(cur, "attn_norm", il);
  6869. // self-attention
  6870. {
  6871. // compute Q and K and RoPE them
  6872. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6873. cb(Qcur, "Qcur", il);
  6874. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6875. cb(Kcur, "Kcur", il);
  6876. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6877. cb(Vcur, "Vcur", il);
  6878. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6879. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6880. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6881. Qcur = ggml_rope_ext(
  6882. ctx0, Qcur, inp_pos, nullptr,
  6883. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6884. ext_factor, attn_factor, beta_fast, beta_slow);
  6885. Kcur = ggml_rope_ext(
  6886. ctx0, Kcur, inp_pos, nullptr,
  6887. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6888. ext_factor, attn_factor, beta_fast, beta_slow);
  6889. cb(Qcur, "Qcur", il);
  6890. cb(Kcur, "Kcur", il);
  6891. cb(Vcur, "Vcur", il);
  6892. Qcur = ggml_scale(ctx0, Qcur, hparams.f_attention_scale);
  6893. cur = build_attn(inp_attn, gf,
  6894. model.layers[il].wo, NULL,
  6895. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
  6896. }
  6897. cur = build_norm(cur,
  6898. model.layers[il].attn_post_norm, NULL,
  6899. LLM_NORM_RMS, il);
  6900. cb(cur, "attn_post_norm", il);
  6901. if (il == n_layer - 1) {
  6902. // skip computing output for unused tokens
  6903. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6904. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6905. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6906. }
  6907. ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
  6908. cb(sa_out, "sa_out", il);
  6909. cur = build_norm(sa_out,
  6910. model.layers[il].ffn_norm, NULL,
  6911. LLM_NORM_RMS, il);
  6912. cb(cur, "ffn_norm", il);
  6913. // feed-forward network
  6914. {
  6915. cur = build_ffn(cur,
  6916. model.layers[il].ffn_up, NULL, NULL,
  6917. model.layers[il].ffn_gate, NULL, NULL,
  6918. model.layers[il].ffn_down, NULL, NULL,
  6919. NULL,
  6920. LLM_FFN_GELU, LLM_FFN_PAR, il);
  6921. cb(cur, "ffn_out", il);
  6922. }
  6923. cur = build_norm(cur,
  6924. model.layers[il].ffn_post_norm, NULL,
  6925. LLM_NORM_RMS, -1);
  6926. cb(cur, "ffn_post_norm", -1);
  6927. cur = ggml_add(ctx0, cur, sa_out);
  6928. cur = build_cvec(cur, il);
  6929. cb(cur, "l_out", il);
  6930. // input for next layer
  6931. inpL = cur;
  6932. }
  6933. cur = inpL;
  6934. cur = build_norm(cur,
  6935. model.output_norm, NULL,
  6936. LLM_NORM_RMS, -1);
  6937. cb(cur, "result_norm", -1);
  6938. res->t_embd = cur;
  6939. // lm_head
  6940. cur = build_lora_mm(model.output, cur);
  6941. // final logit soft-capping
  6942. cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_final_logit_softcapping);
  6943. cur = ggml_tanh(ctx0, cur);
  6944. cur = ggml_scale(ctx0, cur, hparams.f_final_logit_softcapping);
  6945. cb(cur, "result_output", -1);
  6946. res->t_logits = cur;
  6947. ggml_build_forward_expand(gf, cur);
  6948. }
  6949. };
  6950. struct llm_build_gemma3_iswa : public llm_graph_context {
  6951. llm_build_gemma3_iswa(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6952. const int64_t n_embd_head = hparams.n_embd_head_k;
  6953. ggml_tensor * cur;
  6954. ggml_tensor * inpL;
  6955. inpL = build_inp_embd(model.tok_embd);
  6956. // important: do not normalize weights for raw embeddings input (i.e. encoded image emdeddings)
  6957. if (ubatch.token) {
  6958. inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
  6959. cb(inpL, "inp_scaled", -1);
  6960. }
  6961. // inp_pos - contains the positions
  6962. ggml_tensor * inp_pos = build_inp_pos();
  6963. // TODO: is causal == true correct? might need some changes
  6964. auto * inp_attn = build_attn_inp_kv_unified_iswa();
  6965. for (int il = 0; il < n_layer; ++il) {
  6966. const float freq_base_l = model.get_rope_freq_base (cparams, il);
  6967. const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
  6968. // norm
  6969. cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
  6970. cb(cur, "attn_norm", il);
  6971. // self-attention
  6972. {
  6973. // compute Q and K and RoPE them
  6974. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6975. cb(Qcur, "Qcur", il);
  6976. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6977. cb(Kcur, "Kcur", il);
  6978. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6979. cb(Vcur, "Vcur", il);
  6980. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6981. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6982. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6983. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
  6984. cb(Qcur, "Qcur_normed", il);
  6985. Qcur = ggml_rope_ext(
  6986. ctx0, Qcur, inp_pos, nullptr,
  6987. n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
  6988. ext_factor, attn_factor, beta_fast, beta_slow);
  6989. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
  6990. cb(Kcur, "Kcur_normed", il);
  6991. Kcur = ggml_rope_ext(
  6992. ctx0, Kcur, inp_pos, nullptr,
  6993. n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
  6994. ext_factor, attn_factor, beta_fast, beta_slow);
  6995. cb(Qcur, "Qcur", il);
  6996. cb(Kcur, "Kcur", il);
  6997. cb(Vcur, "Vcur", il);
  6998. // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/model.py#L315
  6999. Qcur = ggml_scale(ctx0, Qcur, hparams.f_attention_scale);
  7000. cur = build_attn(inp_attn, gf,
  7001. model.layers[il].wo, NULL,
  7002. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
  7003. }
  7004. cur = build_norm(cur,
  7005. model.layers[il].attn_post_norm, NULL,
  7006. LLM_NORM_RMS, il);
  7007. cb(cur, "attn_post_norm", il);
  7008. if (il == n_layer - 1) {
  7009. // skip computing output for unused tokens
  7010. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7011. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7012. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7013. }
  7014. ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
  7015. cb(sa_out, "sa_out", il);
  7016. cur = build_norm(sa_out,
  7017. model.layers[il].ffn_norm, NULL,
  7018. LLM_NORM_RMS, il);
  7019. cb(cur, "ffn_norm", il);
  7020. // feed-forward network
  7021. {
  7022. cur = build_ffn(cur,
  7023. model.layers[il].ffn_up, NULL, NULL,
  7024. model.layers[il].ffn_gate, NULL, NULL,
  7025. model.layers[il].ffn_down, NULL, NULL,
  7026. NULL,
  7027. LLM_FFN_GELU, LLM_FFN_PAR, il);
  7028. cb(cur, "ffn_out", il);
  7029. }
  7030. cur = build_norm(cur,
  7031. model.layers[il].ffn_post_norm, NULL,
  7032. LLM_NORM_RMS, -1);
  7033. cb(cur, "ffn_post_norm", -1);
  7034. cur = ggml_add(ctx0, cur, sa_out);
  7035. cur = build_cvec(cur, il);
  7036. cb(cur, "l_out", il);
  7037. // input for next layer
  7038. inpL = cur;
  7039. }
  7040. cur = inpL;
  7041. cur = build_norm(cur,
  7042. model.output_norm, NULL,
  7043. LLM_NORM_RMS, -1);
  7044. cb(cur, "result_norm", -1);
  7045. res->t_embd = cur;
  7046. // lm_head
  7047. cur = build_lora_mm(model.output, cur);
  7048. cb(cur, "result_output", -1);
  7049. res->t_logits = cur;
  7050. ggml_build_forward_expand(gf, cur);
  7051. }
  7052. };
  7053. // TODO: move up next to build_starcoder
  7054. struct llm_build_starcoder2 : public llm_graph_context {
  7055. llm_build_starcoder2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7056. const int64_t n_embd_head = hparams.n_embd_head_v;
  7057. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7058. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7059. ggml_tensor * cur;
  7060. ggml_tensor * inpL;
  7061. inpL = build_inp_embd(model.tok_embd);
  7062. // inp_pos - contains the positions
  7063. ggml_tensor * inp_pos = build_inp_pos();
  7064. auto * inp_attn = build_attn_inp_kv_unified();
  7065. for (int il = 0; il < n_layer; ++il) {
  7066. ggml_tensor * inpSA = inpL;
  7067. // norm
  7068. cur = build_norm(inpL,
  7069. model.layers[il].attn_norm, model.layers[il].attn_norm_b,
  7070. LLM_NORM, il);
  7071. cb(cur, "attn_norm", il);
  7072. // self-attention
  7073. {
  7074. // compute Q and K and RoPE them
  7075. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7076. cb(Qcur, "Qcur", il);
  7077. if (model.layers[il].bq) {
  7078. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7079. cb(Qcur, "Qcur", il);
  7080. }
  7081. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7082. cb(Kcur, "Kcur", il);
  7083. if (model.layers[il].bk) {
  7084. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7085. cb(Kcur, "Kcur", il);
  7086. }
  7087. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7088. cb(Vcur, "Vcur", il);
  7089. if (model.layers[il].bv) {
  7090. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7091. cb(Vcur, "Vcur", il);
  7092. }
  7093. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7094. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7095. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7096. Qcur = ggml_rope_ext(
  7097. ctx0, Qcur, inp_pos, nullptr,
  7098. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7099. ext_factor, attn_factor, beta_fast, beta_slow
  7100. );
  7101. Kcur = ggml_rope_ext(
  7102. ctx0, Kcur, inp_pos, nullptr,
  7103. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7104. ext_factor, attn_factor, beta_fast, beta_slow
  7105. );
  7106. cb(Qcur, "Qcur", il);
  7107. cb(Kcur, "Kcur", il);
  7108. cb(Vcur, "Vcur", il);
  7109. cur = build_attn(inp_attn, gf,
  7110. model.layers[il].wo, model.layers[il].bo,
  7111. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7112. }
  7113. if (il == n_layer - 1) {
  7114. // skip computing output for unused tokens
  7115. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7116. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7117. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7118. }
  7119. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7120. cb(ffn_inp, "ffn_inp", il);
  7121. // feed-forward network
  7122. cur = build_norm(ffn_inp,
  7123. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  7124. LLM_NORM, il);
  7125. cb(cur, "ffn_norm", il);
  7126. cur = build_ffn(cur,
  7127. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  7128. NULL, NULL, NULL,
  7129. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  7130. NULL,
  7131. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  7132. cb(cur, "ffn_out", il);
  7133. cur = ggml_add(ctx0, cur, ffn_inp);
  7134. cur = build_cvec(cur, il);
  7135. cb(cur, "l_out", il);
  7136. // input for next layer
  7137. inpL = cur;
  7138. }
  7139. cur = inpL;
  7140. cur = build_norm(cur,
  7141. model.output_norm, model.output_norm_b,
  7142. LLM_NORM, -1);
  7143. cb(cur, "result_norm", -1);
  7144. res->t_embd = cur;
  7145. // lm_head
  7146. cur = build_lora_mm(model.output, cur);
  7147. cb(cur, "result_output", -1);
  7148. res->t_logits = cur;
  7149. ggml_build_forward_expand(gf, cur);
  7150. }
  7151. };
  7152. struct llm_build_mamba : public llm_graph_context {
  7153. const llama_model & model;
  7154. llm_build_mamba(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params), model(model) {
  7155. ggml_tensor * cur;
  7156. ggml_tensor * inpL;
  7157. // {n_embd, n_tokens}
  7158. inpL = build_inp_embd(model.tok_embd);
  7159. ggml_tensor * state_copy = build_inp_s_copy();
  7160. for (int il = 0; il < n_layer; ++il) {
  7161. // norm
  7162. cur = build_norm(inpL,
  7163. model.layers[il].attn_norm, NULL,
  7164. LLM_NORM_RMS, il);
  7165. cb(cur, "attn_norm", il);
  7166. cur = build_mamba_layer(gf, cur, state_copy, ubatch, il);
  7167. if (il == n_layer - 1) {
  7168. // skip computing output for unused tokens
  7169. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7170. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7171. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7172. }
  7173. // residual
  7174. cur = ggml_add(ctx0, cur, inpL);
  7175. cur = build_cvec(cur, il);
  7176. cb(cur, "l_out", il);
  7177. // input for next layer
  7178. inpL = cur;
  7179. }
  7180. // final rmsnorm
  7181. cur = build_norm(inpL,
  7182. model.output_norm, NULL,
  7183. LLM_NORM_RMS, -1);
  7184. cb(cur, "result_norm", -1);
  7185. res->t_embd = cur;
  7186. // lm_head
  7187. cur = build_lora_mm(model.output, cur);
  7188. cb(cur, "result_output", -1);
  7189. res->t_logits = cur;
  7190. ggml_build_forward_expand(gf, cur);
  7191. }
  7192. // TODO: split
  7193. ggml_tensor * build_mamba_layer(
  7194. ggml_cgraph * gf,
  7195. ggml_tensor * cur,
  7196. ggml_tensor * state_copy,
  7197. const llama_ubatch & ubatch,
  7198. int il) const {
  7199. const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
  7200. const auto kv_head = kv_state->get_head();
  7201. const int64_t d_conv = hparams.ssm_d_conv;
  7202. const int64_t d_inner = hparams.ssm_d_inner;
  7203. const int64_t d_state = hparams.ssm_d_state;
  7204. const int64_t dt_rank = hparams.ssm_dt_rank;
  7205. const int64_t n_seqs = ubatch.n_seqs;
  7206. // Some variants of Mamba arch (e.g. FalconMamba do apply layer norm on B and Dt layers)
  7207. const bool ssm_dt_b_c_rms = hparams.ssm_dt_b_c_rms;
  7208. // Use the same RMS norm as the final layer norm
  7209. const float norm_rms_eps = hparams.f_norm_rms_eps;
  7210. const int64_t n_seq_tokens = ubatch.n_seq_tokens;
  7211. GGML_ASSERT(n_seqs != 0);
  7212. GGML_ASSERT(ubatch.equal_seqs);
  7213. GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
  7214. ggml_tensor * conv_states_all = kv_state->get_k_l(il);
  7215. ggml_tensor * ssm_states_all = kv_state->get_v_l(il);
  7216. // (ab)using the KV cache to store the states
  7217. ggml_tensor * conv = build_recurrent_state(
  7218. gf, conv_states_all, state_copy,
  7219. hparams.n_embd_k_s(), n_seqs);
  7220. conv = ggml_reshape_3d(ctx0, conv, d_conv - 1, d_inner, n_seqs);
  7221. ggml_tensor * ssm = build_recurrent_state(
  7222. gf, ssm_states_all, state_copy,
  7223. hparams.n_embd_v_s(), n_seqs);
  7224. ssm = ggml_reshape_3d(ctx0, ssm, d_state, d_inner, n_seqs);
  7225. // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs}
  7226. cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], n_seq_tokens, n_seqs);
  7227. // {n_embd, 2*d_inner} @ {n_embd, n_seq_tokens, n_seqs} => {2*d_inner, n_seq_tokens, n_seqs}
  7228. ggml_tensor * xz = build_lora_mm(model.layers[il].ssm_in, cur);
  7229. // split the above in two
  7230. // => {d_inner, n_seq_tokens, n_seqs}
  7231. ggml_tensor * x = ggml_view_3d(ctx0, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], 0);
  7232. ggml_tensor * z = ggml_view_3d(ctx0, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], d_inner*ggml_element_size(xz));
  7233. // conv
  7234. {
  7235. // => {d_conv - 1 + n_seq_tokens, d_inner, n_seqs}
  7236. ggml_tensor * conv_x = ggml_concat(ctx0, conv, ggml_transpose(ctx0, x), 0);
  7237. // copy last (d_conv - 1) columns back into the state cache
  7238. ggml_tensor * last_conv = ggml_view_3d(ctx0, conv_x, d_conv - 1, d_inner, n_seqs, conv_x->nb[1], conv_x->nb[2], n_seq_tokens*(conv_x->nb[0]));
  7239. ggml_build_forward_expand(gf,
  7240. ggml_cpy(ctx0, last_conv,
  7241. ggml_view_1d(ctx0, conv_states_all,
  7242. (d_conv - 1)*(d_inner)*(n_seqs),
  7243. kv_head*(d_conv - 1)*(d_inner)*ggml_element_size(conv_states_all))));
  7244. // 1D convolution
  7245. // The equivalent is to make a self-overlapping view of conv_x
  7246. // over d_conv columns at each stride in the 3rd dimension,
  7247. // then element-wise multiply that with the conv1d weight,
  7248. // then sum the elements of each row,
  7249. // (the last two steps are a dot product over rows (also doable with mul_mat))
  7250. // then permute away the ne[0] dimension,
  7251. // and then you're left with the resulting x tensor.
  7252. // For simultaneous sequences, all sequences need to have the same length.
  7253. x = ggml_ssm_conv(ctx0, conv_x, model.layers[il].ssm_conv1d);
  7254. // bias
  7255. x = ggml_add(ctx0, x, model.layers[il].ssm_conv1d_b);
  7256. x = ggml_silu(ctx0, x);
  7257. }
  7258. // ssm
  7259. {
  7260. // {d_inner, dt_rank + 2*d_state} @ {d_inner, n_seq_tokens, n_seqs} => {dt_rank + 2*d_state, n_seq_tokens, n_seqs}
  7261. ggml_tensor * x_db = build_lora_mm(model.layers[il].ssm_x, x);
  7262. // split
  7263. ggml_tensor * dt = ggml_view_3d(ctx0, x_db, dt_rank, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], 0);
  7264. ggml_tensor * B = ggml_view_3d(ctx0, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*dt_rank);
  7265. ggml_tensor * C = ggml_view_3d(ctx0, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*(dt_rank+d_state));
  7266. // Some Mamba variants (e.g. FalconMamba) apply RMS norm in B, C & Dt layers
  7267. if (ssm_dt_b_c_rms) {
  7268. dt = ggml_rms_norm(ctx0, dt, norm_rms_eps);
  7269. B = ggml_rms_norm(ctx0, B, norm_rms_eps);
  7270. C = ggml_rms_norm(ctx0, C, norm_rms_eps);
  7271. }
  7272. // {dt_rank, d_inner} @ {dt_rank, n_seq_tokens, n_seqs} => {d_inner, n_seq_tokens, n_seqs}
  7273. dt = build_lora_mm(model.layers[il].ssm_dt, dt);
  7274. dt = ggml_add(ctx0, dt, model.layers[il].ssm_dt_b);
  7275. // Custom operator to optimize the parallel associative scan
  7276. // as described in the Annex D of the Mamba paper.
  7277. // => {d_inner, n_seq_tokens, n_seqs} and {d_state, d_inner, n_seqs}
  7278. ggml_tensor * y_ssm = ggml_ssm_scan(ctx0, ssm, x, dt, model.layers[il].ssm_a, B, C);
  7279. // store last states
  7280. ggml_build_forward_expand(gf,
  7281. ggml_cpy(ctx0,
  7282. ggml_view_1d(ctx0, y_ssm, d_state*d_inner*n_seqs, x->nb[3]),
  7283. ggml_view_1d(ctx0, ssm_states_all, d_state*d_inner*n_seqs, kv_head*d_state*d_inner*ggml_element_size(ssm_states_all))));
  7284. ggml_tensor * y = ggml_view_3d(ctx0, y_ssm, d_inner, n_seq_tokens, n_seqs, x->nb[1], x->nb[2], 0);
  7285. // TODO: skip computing output earlier for unused tokens
  7286. // {d_inner, n_seq_tokens, n_seqs} * {d_inner} => {d_inner, n_seq_tokens, n_seqs}
  7287. y = ggml_add(ctx0, y, ggml_mul(ctx0, x, model.layers[il].ssm_d));
  7288. y = ggml_mul(ctx0, y, ggml_silu(ctx0, ggml_cont(ctx0, z)));
  7289. // {d_inner, n_embd} @ {d_inner, n_seq_tokens, n_seqs} => {n_embd, n_seq_tokens, n_seqs}
  7290. cur = build_lora_mm(model.layers[il].ssm_out, y);
  7291. }
  7292. // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens}
  7293. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], n_seq_tokens * n_seqs);
  7294. //cb(cur, "mamba_out", il);
  7295. return cur;
  7296. }
  7297. };
  7298. struct llm_build_command_r : public llm_graph_context {
  7299. llm_build_command_r(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7300. const int64_t n_embd_head = hparams.n_embd_head_v;
  7301. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7302. const float f_logit_scale = hparams.f_logit_scale;
  7303. ggml_tensor * cur;
  7304. ggml_tensor * inpL;
  7305. inpL = build_inp_embd(model.tok_embd);
  7306. // inp_pos - contains the positions
  7307. ggml_tensor * inp_pos = build_inp_pos();
  7308. auto * inp_attn = build_attn_inp_kv_unified();
  7309. for (int il = 0; il < n_layer; ++il) {
  7310. // norm
  7311. cur = build_norm(inpL,
  7312. model.layers[il].attn_norm, NULL,
  7313. LLM_NORM, il);
  7314. cb(cur, "attn_norm", il);
  7315. ggml_tensor * ffn_inp = cur;
  7316. // self-attention
  7317. {
  7318. // compute Q and K and RoPE them
  7319. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7320. cb(Qcur, "Qcur", il);
  7321. if (model.layers[il].bq) {
  7322. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7323. cb(Qcur, "Qcur", il);
  7324. }
  7325. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7326. cb(Kcur, "Kcur", il);
  7327. if (model.layers[il].bk) {
  7328. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7329. cb(Kcur, "Kcur", il);
  7330. }
  7331. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7332. cb(Vcur, "Vcur", il);
  7333. if (model.layers[il].bv) {
  7334. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7335. cb(Vcur, "Vcur", il);
  7336. }
  7337. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7338. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7339. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7340. if (model.layers[il].attn_q_norm) {
  7341. Qcur = build_norm(Qcur,
  7342. model.layers[il].attn_q_norm,
  7343. NULL,
  7344. LLM_NORM, il);
  7345. cb(Qcur, "Qcur", il);
  7346. }
  7347. Qcur = ggml_rope_ext(
  7348. ctx0, Qcur, inp_pos, nullptr,
  7349. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7350. ext_factor, attn_factor, beta_fast, beta_slow
  7351. );
  7352. if (model.layers[il].attn_k_norm) {
  7353. Kcur = build_norm(Kcur,
  7354. model.layers[il].attn_k_norm,
  7355. NULL,
  7356. LLM_NORM, il);
  7357. cb(Kcur, "Kcur", il);
  7358. }
  7359. Kcur = ggml_rope_ext(
  7360. ctx0, Kcur, inp_pos, nullptr,
  7361. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7362. ext_factor, attn_factor, beta_fast, beta_slow
  7363. );
  7364. cb(Qcur, "Qcur", il);
  7365. cb(Kcur, "Kcur", il);
  7366. cb(Vcur, "Vcur", il);
  7367. cur = build_attn(inp_attn, gf,
  7368. model.layers[il].wo, model.layers[il].bo,
  7369. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7370. }
  7371. if (il == n_layer - 1) {
  7372. // skip computing output for unused tokens
  7373. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7374. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7375. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7376. ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
  7377. }
  7378. ggml_tensor * attn_out = cur;
  7379. // feed-forward network
  7380. {
  7381. cur = build_ffn(ffn_inp,
  7382. model.layers[il].ffn_up, NULL, NULL,
  7383. model.layers[il].ffn_gate, NULL, NULL,
  7384. model.layers[il].ffn_down, NULL, NULL,
  7385. NULL,
  7386. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7387. cb(cur, "ffn_out", il);
  7388. }
  7389. // add together residual + FFN + self-attention
  7390. cur = ggml_add(ctx0, cur, inpL);
  7391. cur = ggml_add(ctx0, cur, attn_out);
  7392. cur = build_cvec(cur, il);
  7393. cb(cur, "l_out", il);
  7394. // input for next layer
  7395. inpL = cur;
  7396. }
  7397. cur = inpL;
  7398. cur = build_norm(cur,
  7399. model.output_norm, NULL,
  7400. LLM_NORM, -1);
  7401. cb(cur, "result_norm", -1);
  7402. res->t_embd = cur;
  7403. // lm_head
  7404. cur = build_lora_mm(model.output, cur);
  7405. if (f_logit_scale) {
  7406. cur = ggml_scale(ctx0, cur, f_logit_scale);
  7407. }
  7408. cb(cur, "result_output", -1);
  7409. res->t_logits = cur;
  7410. ggml_build_forward_expand(gf, cur);
  7411. }
  7412. };
  7413. struct llm_build_cohere2_iswa : public llm_graph_context {
  7414. llm_build_cohere2_iswa(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7415. const int64_t n_embd_head = hparams.n_embd_head_v;
  7416. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7417. const float f_logit_scale = hparams.f_logit_scale;
  7418. ggml_tensor * cur;
  7419. ggml_tensor * inpL;
  7420. inpL = build_inp_embd(model.tok_embd);
  7421. // inp_pos - contains the positions
  7422. ggml_tensor * inp_pos = build_inp_pos();
  7423. auto * inp_attn = build_attn_inp_kv_unified_iswa();
  7424. for (int il = 0; il < n_layer; ++il) {
  7425. const bool is_swa = hparams.is_swa(il);
  7426. // norm
  7427. cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM, il);
  7428. cb(cur, "attn_norm", il);
  7429. ggml_tensor * ffn_inp = cur;
  7430. // self-attention
  7431. {
  7432. // rope freq factors for 128k context
  7433. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  7434. // compute Q and K and RoPE them
  7435. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7436. cb(Qcur, "Qcur", il);
  7437. if (model.layers[il].bq) {
  7438. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7439. cb(Qcur, "Qcur", il);
  7440. }
  7441. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7442. cb(Kcur, "Kcur", il);
  7443. if (model.layers[il].bk) {
  7444. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7445. cb(Kcur, "Kcur", il);
  7446. }
  7447. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7448. cb(Vcur, "Vcur", il);
  7449. if (model.layers[il].bv) {
  7450. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7451. cb(Vcur, "Vcur", il);
  7452. }
  7453. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7454. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7455. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7456. if (is_swa) {
  7457. Qcur = ggml_rope_ext(
  7458. ctx0, Qcur, inp_pos, rope_factors,
  7459. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7460. ext_factor, attn_factor, beta_fast, beta_slow
  7461. );
  7462. Kcur = ggml_rope_ext(
  7463. ctx0, Kcur, inp_pos, rope_factors,
  7464. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7465. ext_factor, attn_factor, beta_fast, beta_slow
  7466. );
  7467. }
  7468. cb(Qcur, "Qcur", il);
  7469. cb(Kcur, "Kcur", il);
  7470. cb(Vcur, "Vcur", il);
  7471. cur = build_attn(inp_attn, gf,
  7472. model.layers[il].wo, model.layers[il].bo,
  7473. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7474. }
  7475. if (il == n_layer - 1) {
  7476. // skip computing output for unused tokens
  7477. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7478. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7479. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7480. ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
  7481. }
  7482. ggml_tensor * attn_out = cur;
  7483. // feed-forward network
  7484. {
  7485. cur = build_ffn(ffn_inp, model.layers[il].ffn_up, NULL, NULL, model.layers[il].ffn_gate,
  7486. NULL, NULL, model.layers[il].ffn_down, NULL, NULL, NULL, LLM_FFN_SILU, LLM_FFN_PAR,
  7487. il);
  7488. cb(cur, "ffn_out", il);
  7489. }
  7490. // add together residual + FFN + self-attention
  7491. cur = ggml_add(ctx0, cur, inpL);
  7492. cur = ggml_add(ctx0, cur, attn_out);
  7493. cur = build_cvec(cur, il);
  7494. cb(cur, "l_out", il);
  7495. // input for next layer
  7496. inpL = cur;
  7497. }
  7498. cur = inpL;
  7499. cur = build_norm(cur, model.output_norm, NULL, LLM_NORM, -1);
  7500. cb(cur, "result_norm", -1);
  7501. res->t_embd = cur;
  7502. // lm_head
  7503. cur = build_lora_mm(model.output, cur);
  7504. if (f_logit_scale) {
  7505. cur = ggml_scale(ctx0, cur, f_logit_scale);
  7506. }
  7507. cb(cur, "result_output", -1);
  7508. res->t_logits = cur;
  7509. ggml_build_forward_expand(gf, cur);
  7510. }
  7511. };
  7512. // ref: https://allenai.org/olmo
  7513. // based on the original build_llama() function, changes:
  7514. // * non-parametric layer norm
  7515. // * clamp qkv
  7516. // * removed bias
  7517. // * removed MoE
  7518. struct llm_build_olmo : public llm_graph_context {
  7519. llm_build_olmo(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7520. const int64_t n_embd_head = hparams.n_embd_head_v;
  7521. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7522. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7523. ggml_tensor * cur;
  7524. ggml_tensor * inpL;
  7525. inpL = build_inp_embd(model.tok_embd);
  7526. // inp_pos - contains the positions
  7527. ggml_tensor * inp_pos = build_inp_pos();
  7528. auto * inp_attn = build_attn_inp_kv_unified();
  7529. for (int il = 0; il < n_layer; ++il) {
  7530. ggml_tensor * inpSA = inpL;
  7531. // norm
  7532. cur = build_norm(inpL,
  7533. NULL, NULL,
  7534. LLM_NORM, il);
  7535. cb(cur, "attn_norm", il);
  7536. // self-attention
  7537. {
  7538. // compute Q and K and RoPE them
  7539. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7540. cb(Qcur, "Qcur", il);
  7541. if (hparams.f_clamp_kqv > 0.0f) {
  7542. Qcur = ggml_clamp(ctx0, Qcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  7543. cb(Qcur, "Qcur", il);
  7544. }
  7545. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7546. cb(Kcur, "Kcur", il);
  7547. if (hparams.f_clamp_kqv > 0.0f) {
  7548. Kcur = ggml_clamp(ctx0, Kcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  7549. cb(Kcur, "Kcur", il);
  7550. }
  7551. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7552. cb(Vcur, "Vcur", il);
  7553. if (hparams.f_clamp_kqv > 0.0f) {
  7554. Vcur = ggml_clamp(ctx0, Vcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  7555. cb(Vcur, "Vcur", il);
  7556. }
  7557. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7558. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7559. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7560. Qcur = ggml_rope_ext(
  7561. ctx0, Qcur, inp_pos, nullptr,
  7562. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7563. ext_factor, attn_factor, beta_fast, beta_slow
  7564. );
  7565. Kcur = ggml_rope_ext(
  7566. ctx0, Kcur, inp_pos, nullptr,
  7567. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7568. ext_factor, attn_factor, beta_fast, beta_slow
  7569. );
  7570. cb(Qcur, "Qcur", il);
  7571. cb(Kcur, "Kcur", il);
  7572. cb(Vcur, "Vcur", il);
  7573. cur = build_attn(inp_attn, gf,
  7574. model.layers[il].wo, nullptr,
  7575. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7576. }
  7577. if (il == n_layer - 1) {
  7578. // skip computing output for unused tokens
  7579. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7580. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7581. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7582. }
  7583. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7584. cb(ffn_inp, "ffn_inp", il);
  7585. // feed-forward network
  7586. cur = build_norm(ffn_inp,
  7587. NULL, NULL,
  7588. LLM_NORM, il);
  7589. cb(cur, "ffn_norm", il);
  7590. cur = build_ffn(cur,
  7591. model.layers[il].ffn_up, NULL, NULL,
  7592. model.layers[il].ffn_gate, NULL, NULL,
  7593. model.layers[il].ffn_down, NULL, NULL,
  7594. NULL,
  7595. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7596. cb(cur, "ffn_out", il);
  7597. cur = ggml_add(ctx0, cur, ffn_inp);
  7598. cb(cur, "ffn_out", il);
  7599. cur = build_cvec(cur, il);
  7600. cb(cur, "l_out", il);
  7601. // input for next layer
  7602. inpL = cur;
  7603. }
  7604. cur = inpL;
  7605. cur = build_norm(cur,
  7606. NULL, NULL,
  7607. LLM_NORM, -1);
  7608. cb(cur, "result_norm", -1);
  7609. res->t_embd = cur;
  7610. // lm_head
  7611. cur = build_lora_mm(model.output, cur);
  7612. cb(cur, "result_output", -1);
  7613. res->t_logits = cur;
  7614. ggml_build_forward_expand(gf, cur);
  7615. }
  7616. };
  7617. struct llm_build_olmo2 : public llm_graph_context {
  7618. llm_build_olmo2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7619. const int64_t n_embd_head = hparams.n_embd_head_v;
  7620. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7621. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7622. ggml_tensor * cur;
  7623. ggml_tensor * inpL;
  7624. inpL = build_inp_embd(model.tok_embd);
  7625. // inp_pos - contains the positions
  7626. ggml_tensor * inp_pos = build_inp_pos();
  7627. auto * inp_attn = build_attn_inp_kv_unified();
  7628. for (int il = 0; il < n_layer; ++il) {
  7629. ggml_tensor * inpSA = inpL;
  7630. cur = inpL;
  7631. // self_attention
  7632. {
  7633. // compute Q and K and RoPE them
  7634. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7635. cb(Qcur, "Qcur", il);
  7636. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7637. cb(Kcur, "Kcur", il);
  7638. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7639. cb(Vcur, "Vcur", il);
  7640. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL,
  7641. LLM_NORM_RMS, il);
  7642. cb(Qcur, "Qcur_normed", il);
  7643. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL,
  7644. LLM_NORM_RMS, il);
  7645. cb(Kcur, "Kcur_normed", il);
  7646. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7647. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7648. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7649. Qcur = ggml_rope_ext(
  7650. ctx0, Qcur, inp_pos, nullptr,
  7651. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7652. ext_factor, attn_factor, beta_fast, beta_slow
  7653. );
  7654. Kcur = ggml_rope_ext(
  7655. ctx0, Kcur, inp_pos, nullptr,
  7656. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7657. ext_factor, attn_factor, beta_fast, beta_slow
  7658. );
  7659. cb(Qcur, "Qcur", il);
  7660. cb(Kcur, "Kcur", il);
  7661. cb(Vcur, "Vcur", il);
  7662. cur = build_attn(inp_attn, gf,
  7663. model.layers[il].wo, NULL,
  7664. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7665. }
  7666. cur = build_norm(cur,
  7667. model.layers[il].attn_post_norm, NULL,
  7668. LLM_NORM_RMS, il);
  7669. cb(cur, "attn_post_norm", il);
  7670. if (il == n_layer - 1) {
  7671. // skip computing output for unused tokens
  7672. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7673. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7674. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7675. }
  7676. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7677. cb(ffn_inp, "ffn_inp", il);
  7678. // feed-forward network
  7679. cur = build_ffn(ffn_inp,
  7680. model.layers[il].ffn_up, NULL, NULL,
  7681. model.layers[il].ffn_gate, NULL, NULL,
  7682. model.layers[il].ffn_down, NULL, NULL,
  7683. NULL,
  7684. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7685. cb(cur, "ffn_out", il);
  7686. cur = build_norm(cur,
  7687. model.layers[il].ffn_post_norm, NULL,
  7688. LLM_NORM_RMS, -1);
  7689. cb(cur, "ffn_post_norm", -1);
  7690. cur = ggml_add(ctx0, cur, ffn_inp);
  7691. cb(cur, "ffn_out", il);
  7692. cur = build_cvec(cur, il);
  7693. cb(cur, "l_out", il);
  7694. // input for next layer
  7695. inpL = cur;
  7696. }
  7697. cur = inpL;
  7698. cur = build_norm(cur,
  7699. model.output_norm, NULL,
  7700. LLM_NORM_RMS, -1);
  7701. cb(cur, "result_norm", -1);
  7702. res->t_embd = cur;
  7703. // lm_head
  7704. cur = build_lora_mm(model.output, cur);
  7705. cb(cur, "result_output", -1);
  7706. res->t_logits = cur;
  7707. ggml_build_forward_expand(gf, cur);
  7708. }
  7709. };
  7710. // based on the build_qwen2moe() function, changes:
  7711. // * removed shared experts
  7712. // * removed bias
  7713. // * added q, k norm
  7714. struct llm_build_olmoe : public llm_graph_context {
  7715. llm_build_olmoe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7716. const int64_t n_embd_head = hparams.n_embd_head_v;
  7717. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7718. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7719. ggml_tensor * cur;
  7720. ggml_tensor * inpL;
  7721. inpL = build_inp_embd(model.tok_embd);
  7722. // inp_pos - contains the positions
  7723. ggml_tensor * inp_pos = build_inp_pos();
  7724. auto * inp_attn = build_attn_inp_kv_unified();
  7725. for (int il = 0; il < n_layer; ++il) {
  7726. ggml_tensor * inpSA = inpL;
  7727. // norm
  7728. cur = build_norm(inpL,
  7729. model.layers[il].attn_norm, NULL,
  7730. LLM_NORM_RMS, il);
  7731. cb(cur, "attn_norm", il);
  7732. // self_attention
  7733. {
  7734. // compute Q and K and RoPE them
  7735. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7736. cb(Qcur, "Qcur", il);
  7737. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7738. cb(Kcur, "Kcur", il);
  7739. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7740. cb(Vcur, "Vcur", il);
  7741. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL,
  7742. LLM_NORM_RMS, il);
  7743. cb(Qcur, "Qcur_normed", il);
  7744. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL,
  7745. LLM_NORM_RMS, il);
  7746. cb(Kcur, "Kcur_normed", il);
  7747. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7748. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7749. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7750. Qcur = ggml_rope_ext(
  7751. ctx0, Qcur, inp_pos, nullptr,
  7752. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7753. ext_factor, attn_factor, beta_fast, beta_slow
  7754. );
  7755. Kcur = ggml_rope_ext(
  7756. ctx0, Kcur, inp_pos, nullptr,
  7757. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7758. ext_factor, attn_factor, beta_fast, beta_slow
  7759. );
  7760. cb(Qcur, "Qcur", il);
  7761. cb(Kcur, "Kcur", il);
  7762. cb(Vcur, "Vcur", il);
  7763. cur = build_attn(inp_attn, gf,
  7764. model.layers[il].wo, NULL,
  7765. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7766. }
  7767. if (il == n_layer - 1) {
  7768. // skip computing output for unused tokens
  7769. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7770. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7771. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7772. }
  7773. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7774. cb(ffn_inp, "ffn_inp", il);
  7775. // MoE branch
  7776. cur = build_norm(ffn_inp,
  7777. model.layers[il].ffn_norm, NULL,
  7778. LLM_NORM_RMS, il);
  7779. cb(cur, "ffn_norm", il);
  7780. cur = build_moe_ffn(cur,
  7781. model.layers[il].ffn_gate_inp,
  7782. model.layers[il].ffn_up_exps,
  7783. model.layers[il].ffn_gate_exps,
  7784. model.layers[il].ffn_down_exps,
  7785. nullptr,
  7786. n_expert, n_expert_used,
  7787. LLM_FFN_SILU, false,
  7788. false, 0.0,
  7789. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  7790. il);
  7791. cb(cur, "ffn_moe_out", il);
  7792. cur = ggml_add(ctx0, cur, ffn_inp);
  7793. cur = build_cvec(cur, il);
  7794. cb(cur, "l_out", il);
  7795. // input for next layer
  7796. inpL = cur;
  7797. }
  7798. cur = inpL;
  7799. cur = build_norm(cur,
  7800. model.output_norm, NULL,
  7801. LLM_NORM_RMS, -1);
  7802. cb(cur, "result_norm", -1);
  7803. res->t_embd = cur;
  7804. // lm_head
  7805. cur = build_lora_mm(model.output, cur);
  7806. cb(cur, "result_output", -1);
  7807. res->t_logits = cur;
  7808. ggml_build_forward_expand(gf, cur);
  7809. }
  7810. };
  7811. struct llm_build_openelm : public llm_graph_context {
  7812. llm_build_openelm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7813. const int64_t n_embd_head = hparams.n_embd_head_v;
  7814. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7815. ggml_tensor * cur;
  7816. ggml_tensor * inpL;
  7817. inpL = build_inp_embd(model.tok_embd);
  7818. // inp_pos - contains the positions
  7819. ggml_tensor * inp_pos = build_inp_pos();
  7820. auto * inp_attn = build_attn_inp_kv_unified();
  7821. for (int il = 0; il < n_layer; ++il) {
  7822. const int64_t n_head = hparams.n_head(il);
  7823. const int64_t n_head_kv = hparams.n_head_kv(il);
  7824. const int64_t n_head_qkv = 2*n_head_kv + n_head;
  7825. cur = inpL;
  7826. ggml_tensor * residual = cur;
  7827. // norm
  7828. cur = build_norm(inpL,
  7829. model.layers[il].attn_norm, NULL,
  7830. LLM_NORM_RMS, il);
  7831. cb(cur, "attn_norm", il);
  7832. // self-attention
  7833. {
  7834. cur = build_lora_mm(model.layers[il].wqkv, cur);
  7835. cb(cur, "wqkv", il);
  7836. cur = ggml_reshape_3d(ctx0, cur, n_embd_head_k, n_head_qkv, n_tokens);
  7837. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, cur->nb[1], cur->nb[2], 0));
  7838. cb(Qcur, "Qcur", il);
  7839. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*n_head));
  7840. cb(Kcur, "Kcur", il);
  7841. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*(n_head+n_head_kv)));
  7842. cb(Vcur, "Vcur", il);
  7843. Qcur = build_norm(Qcur,
  7844. model.layers[il].attn_q_norm, NULL,
  7845. LLM_NORM_RMS, il);
  7846. cb(Qcur, "Qcur", il);
  7847. Kcur = build_norm(Kcur,
  7848. model.layers[il].attn_k_norm, NULL,
  7849. LLM_NORM_RMS, il);
  7850. cb(Kcur, "Kcur", il);
  7851. Qcur = ggml_rope_ext(
  7852. ctx0, Qcur, inp_pos, NULL,
  7853. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7854. ext_factor, attn_factor, beta_fast, beta_slow
  7855. );
  7856. Kcur = ggml_rope_ext(
  7857. ctx0, Kcur, inp_pos, NULL,
  7858. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7859. ext_factor, attn_factor, beta_fast, beta_slow
  7860. );
  7861. cb(Qcur, "Qcur", il);
  7862. cb(Kcur, "Kcur", il);
  7863. cb(Qcur, "Vcur", il);
  7864. cur = build_attn(inp_attn, gf,
  7865. model.layers[il].wo, NULL,
  7866. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7867. }
  7868. if (il == n_layer - 1) {
  7869. // skip computing output for unused tokens
  7870. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7871. residual = ggml_get_rows(ctx0, residual, inp_out_ids);
  7872. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7873. }
  7874. ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
  7875. cb(ffn_inp, "ffn_inp", il);
  7876. // feed-forward network
  7877. {
  7878. cur = build_norm(ffn_inp,
  7879. model.layers[il].ffn_norm, NULL,
  7880. LLM_NORM_RMS, il);
  7881. cb(cur, "ffn_norm", il);
  7882. cur = build_ffn(cur,
  7883. model.layers[il].ffn_up, NULL, NULL,
  7884. model.layers[il].ffn_gate, NULL, NULL,
  7885. model.layers[il].ffn_down, NULL, NULL,
  7886. NULL,
  7887. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7888. cb(cur, "ffn_out", il);
  7889. }
  7890. cur = ggml_add(ctx0, cur, ffn_inp);
  7891. cur = build_cvec(cur, il);
  7892. cb(cur, "l_out", il);
  7893. inpL = cur;
  7894. }
  7895. cur = inpL;
  7896. // norm
  7897. cur = build_norm(cur,
  7898. model.output_norm, NULL,
  7899. LLM_NORM_RMS, -1);
  7900. cb(cur, "result_norm", -1);
  7901. res->t_embd = cur;
  7902. cur = build_lora_mm(model.output, cur);
  7903. cb(cur, "result_output", -1);
  7904. res->t_logits = cur;
  7905. ggml_build_forward_expand(gf, cur);
  7906. }
  7907. };
  7908. struct llm_build_gptneox : public llm_graph_context {
  7909. llm_build_gptneox(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7910. const int64_t n_embd_head = hparams.n_embd_head_v;
  7911. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7912. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7913. ggml_tensor * cur;
  7914. ggml_tensor * inpL;
  7915. inpL = build_inp_embd(model.tok_embd);
  7916. // inp_pos - contains the positions
  7917. ggml_tensor * inp_pos = build_inp_pos();
  7918. auto * inp_attn = build_attn_inp_kv_unified();
  7919. for (int il = 0; il < n_layer; ++il) {
  7920. cur = build_norm(inpL,
  7921. model.layers[il].attn_norm,
  7922. model.layers[il].attn_norm_b,
  7923. LLM_NORM, il);
  7924. cb(cur, "attn_norm", il);
  7925. // self-attention
  7926. {
  7927. cur = build_lora_mm(model.layers[il].wqkv, cur);
  7928. cb(cur, "wqkv", il);
  7929. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7930. cb(cur, "bqkv", il);
  7931. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7932. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7933. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  7934. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7935. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7936. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7937. Qcur = ggml_rope_ext(
  7938. ctx0, Qcur, inp_pos, nullptr,
  7939. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7940. ext_factor, attn_factor, beta_fast, beta_slow
  7941. );
  7942. Kcur = ggml_rope_ext(
  7943. ctx0, Kcur, inp_pos, nullptr,
  7944. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7945. ext_factor, attn_factor, beta_fast, beta_slow
  7946. );
  7947. cb(Qcur, "Qcur", il);
  7948. cb(Kcur, "Kcur", il);
  7949. cb(Vcur, "Vcur", il);
  7950. cur = build_attn(inp_attn, gf,
  7951. model.layers[il].wo, model.layers[il].bo,
  7952. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7953. }
  7954. if (il == n_layer - 1) {
  7955. // skip computing output for unused tokens
  7956. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7957. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7958. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7959. }
  7960. // ffn
  7961. if (hparams.use_par_res) {
  7962. // attention and ffn are computed in parallel
  7963. // x = x + attn(ln1(x)) + ffn(ln2(x))
  7964. ggml_tensor * attn_out = cur;
  7965. cur = build_norm(inpL,
  7966. model.layers[il].ffn_norm,
  7967. model.layers[il].ffn_norm_b,
  7968. LLM_NORM, il);
  7969. cb(cur, "ffn_norm", il);
  7970. cur = build_ffn(cur,
  7971. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  7972. NULL, NULL, NULL,
  7973. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  7974. NULL,
  7975. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  7976. cb(cur, "ffn_out", il);
  7977. cur = ggml_add(ctx0, cur, inpL);
  7978. cb(cur, "ffn_out", il);
  7979. cur = ggml_add(ctx0, cur, attn_out);
  7980. cur = build_cvec(cur, il);
  7981. cb(cur, "l_out", il);
  7982. // input for next layer
  7983. inpL = cur;
  7984. } else {
  7985. // attention and ffn are computed sequentially
  7986. // x = x + attn(ln1(x))
  7987. // x = x + ffn(ln2(x))
  7988. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  7989. cb(ffn_inp, "ffn_inp", il);
  7990. cur = build_norm(ffn_inp,
  7991. model.layers[il].ffn_norm,
  7992. model.layers[il].ffn_norm_b,
  7993. LLM_NORM, il);
  7994. cb(cur, "ffn_norm", il);
  7995. cur = build_ffn(cur,
  7996. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  7997. NULL, NULL, NULL,
  7998. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  7999. NULL,
  8000. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  8001. cb(cur, "ffn_out", il);
  8002. cur = ggml_add(ctx0, cur, ffn_inp);
  8003. cur = build_cvec(cur, il);
  8004. cb(cur, "l_out", il);
  8005. // input for next layer
  8006. inpL = cur;
  8007. }
  8008. }
  8009. cur = build_norm(inpL,
  8010. model.output_norm,
  8011. model.output_norm_b,
  8012. LLM_NORM, -1);
  8013. cb(cur, "result_norm", -1);
  8014. res->t_embd = cur;
  8015. cur = build_lora_mm(model.output, cur);
  8016. cb(cur, "result_output", -1);
  8017. res->t_logits = cur;
  8018. ggml_build_forward_expand(gf, cur);
  8019. }
  8020. };
  8021. struct llm_build_arctic : public llm_graph_context {
  8022. llm_build_arctic(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8023. const int64_t n_embd_head = hparams.n_embd_head_v;
  8024. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8025. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8026. ggml_tensor * cur;
  8027. ggml_tensor * inpL;
  8028. inpL = build_inp_embd(model.tok_embd);
  8029. // inp_pos - contains the positions
  8030. ggml_tensor * inp_pos = build_inp_pos();
  8031. auto * inp_attn = build_attn_inp_kv_unified();
  8032. for (int il = 0; il < n_layer; ++il) {
  8033. ggml_tensor * inpSA = inpL;
  8034. // norm
  8035. cur = build_norm(inpL,
  8036. model.layers[il].attn_norm, NULL,
  8037. LLM_NORM_RMS, il);
  8038. cb(cur, "attn_norm", il);
  8039. // self-attention
  8040. {
  8041. // compute Q and K and RoPE them
  8042. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  8043. cb(Qcur, "Qcur", il);
  8044. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  8045. cb(Kcur, "Kcur", il);
  8046. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  8047. cb(Vcur, "Vcur", il);
  8048. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8049. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8050. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8051. Qcur = ggml_rope_ext(
  8052. ctx0, Qcur, inp_pos, nullptr,
  8053. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8054. ext_factor, attn_factor, beta_fast, beta_slow
  8055. );
  8056. Kcur = ggml_rope_ext(
  8057. ctx0, Kcur, inp_pos, nullptr,
  8058. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8059. ext_factor, attn_factor, beta_fast, beta_slow
  8060. );
  8061. cb(Qcur, "Qcur", il);
  8062. cb(Kcur, "Kcur", il);
  8063. cb(Vcur, "Vcur", il);
  8064. cur = build_attn(inp_attn, gf,
  8065. model.layers[il].wo, NULL,
  8066. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  8067. }
  8068. if (il == n_layer - 1) {
  8069. // skip computing output for unused tokens
  8070. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8071. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8072. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8073. }
  8074. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8075. cb(ffn_inp, "ffn_inp", il);
  8076. // feed-forward network
  8077. cur = build_norm(ffn_inp,
  8078. model.layers[il].ffn_norm, NULL,
  8079. LLM_NORM_RMS, il);
  8080. cb(cur, "ffn_norm", il);
  8081. cur = build_ffn(cur,
  8082. model.layers[il].ffn_up, NULL, NULL,
  8083. model.layers[il].ffn_gate, NULL, NULL,
  8084. model.layers[il].ffn_down, NULL, NULL,
  8085. NULL,
  8086. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8087. cb(cur, "ffn_out", il);
  8088. ggml_tensor * ffn_out = ggml_add(ctx0, cur, ffn_inp);
  8089. cb(ffn_out, "ffn_out", il);
  8090. // MoE
  8091. cur = build_norm(inpSA,
  8092. model.layers[il].ffn_norm_exps, NULL,
  8093. LLM_NORM_RMS, il);
  8094. cb(cur, "ffn_norm_exps", il);
  8095. cur = build_moe_ffn(cur,
  8096. model.layers[il].ffn_gate_inp,
  8097. model.layers[il].ffn_up_exps,
  8098. model.layers[il].ffn_gate_exps,
  8099. model.layers[il].ffn_down_exps,
  8100. nullptr,
  8101. n_expert, n_expert_used,
  8102. LLM_FFN_SILU, true,
  8103. false, 0.0,
  8104. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  8105. il);
  8106. cb(cur, "ffn_moe_out", il);
  8107. cur = ggml_add(ctx0, cur, ffn_out);
  8108. cb(cur, "ffn_out", il);
  8109. cur = build_cvec(cur, il);
  8110. cb(cur, "l_out", il);
  8111. // input for next layer
  8112. inpL = cur;
  8113. }
  8114. cur = inpL;
  8115. cur = build_norm(cur,
  8116. model.output_norm, NULL,
  8117. LLM_NORM_RMS, -1);
  8118. cb(cur, "result_norm", -1);
  8119. res->t_embd = cur;
  8120. // lm_head
  8121. cur = build_lora_mm(model.output, cur);
  8122. cb(cur, "result_output", -1);
  8123. res->t_logits = cur;
  8124. ggml_build_forward_expand(gf, cur);
  8125. }
  8126. };
  8127. struct llm_build_deepseek : public llm_graph_context {
  8128. llm_build_deepseek(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8129. const int64_t n_embd_head = hparams.n_embd_head_v;
  8130. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8131. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8132. ggml_tensor * cur;
  8133. ggml_tensor * inpL;
  8134. inpL = build_inp_embd(model.tok_embd);
  8135. // inp_pos - contains the positions
  8136. ggml_tensor * inp_pos = build_inp_pos();
  8137. auto * inp_attn = build_attn_inp_kv_unified();
  8138. const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
  8139. for (int il = 0; il < n_layer; ++il) {
  8140. ggml_tensor * inpSA = inpL;
  8141. // norm
  8142. cur = build_norm(inpL,
  8143. model.layers[il].attn_norm, NULL,
  8144. LLM_NORM_RMS, il);
  8145. cb(cur, "attn_norm", il);
  8146. // self-attention
  8147. {
  8148. // rope freq factors for llama3; may return nullptr for llama2 and other models
  8149. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  8150. // compute Q and K and RoPE them
  8151. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  8152. cb(Qcur, "Qcur", il);
  8153. if (model.layers[il].bq) {
  8154. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8155. cb(Qcur, "Qcur", il);
  8156. }
  8157. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  8158. cb(Kcur, "Kcur", il);
  8159. if (model.layers[il].bk) {
  8160. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8161. cb(Kcur, "Kcur", il);
  8162. }
  8163. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  8164. cb(Vcur, "Vcur", il);
  8165. if (model.layers[il].bv) {
  8166. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8167. cb(Vcur, "Vcur", il);
  8168. }
  8169. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8170. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8171. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8172. Qcur = ggml_rope_ext(
  8173. ctx0, Qcur, inp_pos, rope_factors,
  8174. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8175. ext_factor, attn_factor, beta_fast, beta_slow
  8176. );
  8177. Kcur = ggml_rope_ext(
  8178. ctx0, Kcur, inp_pos, rope_factors,
  8179. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8180. ext_factor, attn_factor, beta_fast, beta_slow
  8181. );
  8182. cb(Qcur, "Qcur", il);
  8183. cb(Kcur, "Kcur", il);
  8184. cb(Vcur, "Vcur", il);
  8185. cur = build_attn(inp_attn, gf,
  8186. model.layers[il].wo, model.layers[il].bo,
  8187. Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il);
  8188. }
  8189. if (il == n_layer - 1) {
  8190. // skip computing output for unused tokens
  8191. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8192. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8193. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8194. }
  8195. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8196. cb(ffn_inp, "ffn_inp", il);
  8197. cur = build_norm(ffn_inp,
  8198. model.layers[il].ffn_norm, NULL,
  8199. LLM_NORM_RMS, il);
  8200. cb(cur, "ffn_norm", il);
  8201. if ((uint32_t) il < hparams.n_layer_dense_lead) {
  8202. cur = build_ffn(cur,
  8203. model.layers[il].ffn_up, NULL, NULL,
  8204. model.layers[il].ffn_gate, NULL, NULL,
  8205. model.layers[il].ffn_down, NULL, NULL,
  8206. NULL,
  8207. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8208. cb(cur, "ffn_out", il);
  8209. } else {
  8210. // MoE branch
  8211. ggml_tensor * moe_out =
  8212. build_moe_ffn(cur,
  8213. model.layers[il].ffn_gate_inp,
  8214. model.layers[il].ffn_up_exps,
  8215. model.layers[il].ffn_gate_exps,
  8216. model.layers[il].ffn_down_exps,
  8217. nullptr,
  8218. n_expert, n_expert_used,
  8219. LLM_FFN_SILU, false,
  8220. false, hparams.expert_weights_scale,
  8221. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  8222. il);
  8223. cb(moe_out, "ffn_moe_out", il);
  8224. // FFN shared expert
  8225. {
  8226. ggml_tensor * ffn_shexp = build_ffn(cur,
  8227. model.layers[il].ffn_up_shexp, NULL, NULL,
  8228. model.layers[il].ffn_gate_shexp, NULL, NULL,
  8229. model.layers[il].ffn_down_shexp, NULL, NULL,
  8230. NULL,
  8231. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8232. cb(ffn_shexp, "ffn_shexp", il);
  8233. cur = ggml_add(ctx0, moe_out, ffn_shexp);
  8234. cb(cur, "ffn_out", il);
  8235. }
  8236. }
  8237. cur = ggml_add(ctx0, cur, ffn_inp);
  8238. cur = build_cvec(cur, il);
  8239. cb(cur, "l_out", il);
  8240. // input for next layer
  8241. inpL = cur;
  8242. }
  8243. cur = inpL;
  8244. cur = build_norm(cur,
  8245. model.output_norm, NULL,
  8246. LLM_NORM_RMS, -1);
  8247. cb(cur, "result_norm", -1);
  8248. res->t_embd = cur;
  8249. // lm_head
  8250. cur = build_lora_mm(model.output, cur);
  8251. cb(cur, "result_output", -1);
  8252. res->t_logits = cur;
  8253. ggml_build_forward_expand(gf, cur);
  8254. }
  8255. };
  8256. struct llm_build_deepseek2 : public llm_graph_context {
  8257. llm_build_deepseek2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8258. bool is_lite = (hparams.n_layer == 27);
  8259. const bool is_mla = (hparams.n_embd_head_k_mla != 0 && hparams.n_embd_head_v_mla != 0);
  8260. // note: these are the actual head sizes you get when treating as MHA or after "decompression" using wv_b for MLA
  8261. const int64_t n_embd_head_k = is_mla ? hparams.n_embd_head_k_mla : hparams.n_embd_head_k;
  8262. const int64_t n_embd_head_v = is_mla ? hparams.n_embd_head_v_mla : hparams.n_embd_head_v;
  8263. const int64_t n_embd_head_qk_rope = hparams.n_rot;
  8264. const int64_t n_embd_head_qk_nope = n_embd_head_k - n_embd_head_qk_rope;
  8265. const uint32_t kv_lora_rank = hparams.n_lora_kv;
  8266. // We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly.
  8267. // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.
  8268. const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale));
  8269. const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(n_embd_head_k));
  8270. const float attn_factor = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale));
  8271. ggml_tensor * cur;
  8272. ggml_tensor * inpL;
  8273. // {n_embd, n_tokens}
  8274. inpL = build_inp_embd(model.tok_embd);
  8275. // inp_pos - contains the positions
  8276. ggml_tensor * inp_pos = build_inp_pos();
  8277. auto * inp_attn = build_attn_inp_kv_unified();
  8278. for (int il = 0; il < n_layer; ++il) {
  8279. ggml_tensor * inpSA = inpL;
  8280. // norm
  8281. cur = build_norm(inpL,
  8282. model.layers[il].attn_norm, NULL,
  8283. LLM_NORM_RMS, il);
  8284. cb(cur, "attn_norm", il);
  8285. // self_attention
  8286. {
  8287. ggml_tensor * q = NULL;
  8288. if (!is_lite) {
  8289. q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
  8290. cb(q, "q", il);
  8291. q = build_norm(q,
  8292. model.layers[il].attn_q_a_norm, nullptr,
  8293. LLM_NORM_RMS, il);
  8294. cb(q, "q", il);
  8295. q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
  8296. cb(q, "q", il);
  8297. } else {
  8298. q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8299. cb(q, "q", il);
  8300. }
  8301. // split into {n_embd_head_qk_nope, n_head, n_tokens}
  8302. ggml_tensor * q_nope = ggml_view_3d(ctx0, q,
  8303. n_embd_head_qk_nope, n_head, n_tokens,
  8304. ggml_row_size(q->type, n_embd_head_k),
  8305. ggml_row_size(q->type, n_embd_head_k) * n_head,
  8306. 0);
  8307. cb(q_nope, "q_nope", il);
  8308. // and {n_embd_head_qk_rope, n_head, n_tokens}
  8309. ggml_tensor * q_pe = ggml_view_3d(ctx0, q,
  8310. n_embd_head_qk_rope, n_head, n_tokens,
  8311. ggml_row_size(q->type, n_embd_head_k),
  8312. ggml_row_size(q->type, n_embd_head_k) * n_head,
  8313. ggml_row_size(q->type, n_embd_head_qk_nope));
  8314. cb(q_pe, "q_pe", il);
  8315. ggml_tensor * kv_cmpr_pe = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
  8316. cb(kv_cmpr_pe, "kv_cmpr_pe", il);
  8317. // split into {kv_lora_rank, n_tokens}
  8318. ggml_tensor * kv_cmpr = ggml_view_2d(ctx0, kv_cmpr_pe,
  8319. kv_lora_rank, n_tokens,
  8320. ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope),
  8321. 0);
  8322. cb(kv_cmpr, "kv_cmpr", il);
  8323. // and {n_embd_head_qk_rope, 1, n_tokens}
  8324. ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_cmpr_pe,
  8325. n_embd_head_qk_rope, 1, n_tokens,
  8326. ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope),
  8327. ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope),
  8328. ggml_row_size(kv_cmpr_pe->type, kv_lora_rank));
  8329. cb(k_pe, "k_pe", il);
  8330. q_pe = ggml_rope_ext(ctx0, q_pe, inp_pos, nullptr,
  8331. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8332. ext_factor, attn_factor, beta_fast, beta_slow
  8333. );
  8334. cb(q_pe, "q_pe", il);
  8335. k_pe = ggml_rope_ext(ctx0, k_pe, inp_pos, nullptr,
  8336. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8337. ext_factor, attn_factor, beta_fast, beta_slow
  8338. );
  8339. cb(k_pe, "k_pe", il);
  8340. kv_cmpr = build_norm(kv_cmpr,
  8341. model.layers[il].attn_kv_a_norm, nullptr,
  8342. LLM_NORM_RMS, il);
  8343. cb(kv_cmpr, "kv_cmpr", il);
  8344. if (is_mla) {
  8345. // {n_embd_head_qk_nope, n_tokens, n_head}
  8346. q_nope = ggml_permute(ctx0, q_nope, 0, 2, 1, 3);
  8347. cb(q_nope, "q_nope_perm", il);
  8348. // {n_embd_head_qk_nope, kv_lora_rank, n_head} x {n_embd_head_qk_nope, n_tokens, n_head}
  8349. ggml_tensor * q_nope_absorbed = ggml_mul_mat(ctx0, model.layers[il].wk_b, q_nope);
  8350. cb(q_nope_absorbed, "q_nope_absorbed", il);
  8351. // {kv_lora_rank, n_head, n_tokens}
  8352. q_nope_absorbed = ggml_permute(ctx0, q_nope_absorbed, 0, 2, 1, 3);
  8353. cb(q_nope_absorbed, "q_nope_absorbed_perm", il);
  8354. // {n_embd_head_qk_rope + kv_lora_rank, n_head, n_tokens}
  8355. // note: rope must go first for in-place context shifting in build_rope_shift()
  8356. ggml_tensor * Qcur = ggml_concat(ctx0, q_pe, q_nope_absorbed, 0);
  8357. cb(Qcur, "Qcur", il);
  8358. kv_cmpr = ggml_reshape_3d(ctx0, kv_cmpr, kv_lora_rank, 1, n_tokens);
  8359. cb(kv_cmpr, "kv_cmpr_reshape", il);
  8360. // {n_embd_head_qk_rope + kv_lora_rank, 1, n_tokens}
  8361. ggml_tensor * Kcur = ggml_concat(ctx0, k_pe, kv_cmpr, 0);
  8362. cb(Kcur, "Kcur", il);
  8363. // {kv_lora_rank, 1, n_tokens}
  8364. ggml_tensor * Vcur = kv_cmpr;
  8365. cb(Vcur, "Vcur", il);
  8366. // note: MLA with the absorption optimzation converts into MQA (ie: GQA with 1 group)
  8367. cur = build_attn(inp_attn, gf,
  8368. model.layers[il].wo, NULL,
  8369. Qcur, Kcur, Vcur, nullptr, model.layers[il].wv_b, kq_scale, il);
  8370. } else {
  8371. ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_cmpr);
  8372. cb(kv, "kv", il);
  8373. // split into {n_embd_head_qk_nope, n_head, n_tokens}
  8374. ggml_tensor * k_nope = ggml_view_3d(ctx0, kv,
  8375. n_embd_head_qk_nope, n_head, n_tokens,
  8376. ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v),
  8377. ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v) * n_head,
  8378. 0);
  8379. cb(k_nope, "k_nope_view", il);
  8380. // and {n_embd_head_v, n_head, n_tokens}
  8381. ggml_tensor * Vcur = ggml_view_3d(ctx0, kv,
  8382. n_embd_head_v, n_head, n_tokens,
  8383. ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v),
  8384. ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v) * n_head,
  8385. ggml_row_size(kv->type, n_embd_head_qk_nope));
  8386. cb(Vcur, "Vcur_view", il);
  8387. Vcur = ggml_cont(ctx0, Vcur);
  8388. cb(Vcur, "Vcur_cont", il);
  8389. // note: rope must go first for in-place context shifting in build_rope_shift()
  8390. ggml_tensor * Qcur = ggml_concat(ctx0, q_pe, q_nope, 0);
  8391. cb(Qcur, "Qcur", il);
  8392. ggml_tensor * Kcur = ggml_concat(ctx0, ggml_repeat(ctx0, k_pe, q_pe), k_nope, 0);
  8393. cb(Kcur, "Kcur", il);
  8394. // note: MLA without the absorption optimization converts into MHA (ie: GQA with full n_head groups)
  8395. cur = build_attn(inp_attn, gf,
  8396. model.layers[il].wo, NULL,
  8397. Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il);
  8398. }
  8399. }
  8400. if (il == n_layer - 1) {
  8401. // skip computing output for unused tokens
  8402. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8403. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8404. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8405. }
  8406. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8407. cb(ffn_inp, "ffn_inp", il);
  8408. cur = build_norm(ffn_inp,
  8409. model.layers[il].ffn_norm, NULL,
  8410. LLM_NORM_RMS, il);
  8411. cb(cur, "ffn_norm", il);
  8412. if ((uint32_t) il < hparams.n_layer_dense_lead) {
  8413. cur = build_ffn(cur,
  8414. model.layers[il].ffn_up, NULL, NULL,
  8415. model.layers[il].ffn_gate, NULL, NULL,
  8416. model.layers[il].ffn_down, NULL, NULL,
  8417. NULL,
  8418. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8419. cb(cur, "ffn_out", il);
  8420. } else {
  8421. // MoE branch
  8422. ggml_tensor * moe_out =
  8423. build_moe_ffn(cur,
  8424. model.layers[il].ffn_gate_inp,
  8425. model.layers[il].ffn_up_exps,
  8426. model.layers[il].ffn_gate_exps,
  8427. model.layers[il].ffn_down_exps,
  8428. model.layers[il].ffn_exp_probs_b,
  8429. n_expert, n_expert_used,
  8430. LLM_FFN_SILU, hparams.expert_weights_norm,
  8431. true, hparams.expert_weights_scale,
  8432. (llama_expert_gating_func_type) hparams.expert_gating_func,
  8433. il);
  8434. cb(moe_out, "ffn_moe_out", il);
  8435. // FFN shared expert
  8436. {
  8437. ggml_tensor * ffn_shexp = build_ffn(cur,
  8438. model.layers[il].ffn_up_shexp, NULL, NULL,
  8439. model.layers[il].ffn_gate_shexp, NULL, NULL,
  8440. model.layers[il].ffn_down_shexp, NULL, NULL,
  8441. NULL,
  8442. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8443. cb(ffn_shexp, "ffn_shexp", il);
  8444. cur = ggml_add(ctx0, moe_out, ffn_shexp);
  8445. cb(cur, "ffn_out", il);
  8446. }
  8447. }
  8448. cur = ggml_add(ctx0, cur, ffn_inp);
  8449. cur = build_cvec(cur, il);
  8450. cb(cur, "l_out", il);
  8451. // input for next layer
  8452. inpL = cur;
  8453. }
  8454. cur = inpL;
  8455. cur = build_norm(cur,
  8456. model.output_norm, NULL,
  8457. LLM_NORM_RMS, -1);
  8458. cb(cur, "result_norm", -1);
  8459. res->t_embd = cur;
  8460. // lm_head
  8461. cur = ggml_mul_mat(ctx0, model.output, cur);
  8462. cb(cur, "result_output", -1);
  8463. res->t_logits = cur;
  8464. ggml_build_forward_expand(gf, cur);
  8465. }
  8466. };
  8467. struct llm_build_bitnet : public llm_graph_context {
  8468. llm_build_bitnet(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8469. const int64_t n_embd_head = hparams.n_embd_head_v;
  8470. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8471. ggml_tensor * cur;
  8472. ggml_tensor * inpL;
  8473. inpL = build_inp_embd(model.tok_embd);
  8474. // inp_pos - contains the positions
  8475. ggml_tensor * inp_pos = build_inp_pos();
  8476. auto * inp_attn = build_attn_inp_kv_unified();
  8477. for (int il = 0; il < n_layer; ++il) {
  8478. ggml_tensor * inpSA = inpL;
  8479. cur = build_norm(inpL,
  8480. model.layers[il].attn_norm, NULL,
  8481. LLM_NORM_RMS, il);
  8482. cb(cur, "attn_norm", il);
  8483. // self-attention
  8484. {
  8485. // compute Q and K and RoPE them
  8486. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  8487. if (model.layers[il].wq_scale) {
  8488. Qcur = ggml_mul(ctx0, Qcur, model.layers[il].wq_scale);
  8489. }
  8490. cb(Qcur, "Qcur", il);
  8491. if (model.layers[il].bq) {
  8492. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8493. cb(Qcur, "Qcur", il);
  8494. }
  8495. // B1.K
  8496. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  8497. if (model.layers[il].wk_scale) {
  8498. Kcur = ggml_mul(ctx0, Kcur, model.layers[il].wk_scale);
  8499. }
  8500. cb(Kcur, "Kcur", il);
  8501. if (model.layers[il].bk) {
  8502. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8503. cb(Kcur, "Kcur", il);
  8504. }
  8505. // B1.V
  8506. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  8507. if (model.layers[il].wv_scale) {
  8508. Vcur = ggml_mul(ctx0, Vcur, model.layers[il].wv_scale);
  8509. }
  8510. cb(Vcur, "Vcur", il);
  8511. if (model.layers[il].bv) {
  8512. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8513. cb(Vcur, "Vcur", il);
  8514. }
  8515. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8516. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8517. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8518. Qcur = ggml_rope_ext(
  8519. ctx0, Qcur, inp_pos, nullptr,
  8520. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8521. ext_factor, attn_factor, beta_fast, beta_slow
  8522. );
  8523. Kcur = ggml_rope_ext(
  8524. ctx0, Kcur, inp_pos, nullptr,
  8525. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8526. ext_factor, attn_factor, beta_fast, beta_slow
  8527. );
  8528. cb(Qcur, "Qcur", il);
  8529. cb(Kcur, "Kcur", il);
  8530. cb(Vcur, "Vcur", il);
  8531. cur = build_attn(inp_attn, gf,
  8532. NULL, NULL,
  8533. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  8534. cur = build_norm(cur,
  8535. model.layers[il].attn_sub_norm, NULL,
  8536. LLM_NORM_RMS, il);
  8537. cb(cur, "attn_sub_norm", il);
  8538. cur = build_lora_mm(model.layers[il].wo, cur);
  8539. if (model.layers[il].wo_scale) {
  8540. cur = ggml_mul(ctx0, cur, model.layers[il].wo_scale);
  8541. }
  8542. if (model.layers[il].bo) {
  8543. cur = ggml_add(ctx0, cur, model.layers[il].bo);
  8544. }
  8545. cb(cur, "attn_o_out", il);
  8546. }
  8547. if (il == n_layer - 1) {
  8548. // skip computing output for unused tokens
  8549. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8550. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8551. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8552. }
  8553. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8554. cb(ffn_inp, "ffn_inp", il);
  8555. // feed-forward forward
  8556. cur = build_norm(ffn_inp,
  8557. model.layers[il].ffn_norm, NULL,
  8558. LLM_NORM_RMS, il);
  8559. cb(cur, "ffn_norm", il);
  8560. cur = build_ffn(cur,
  8561. model.layers[il].ffn_up, NULL, model.layers[il].ffn_up_scale,
  8562. model.layers[il].ffn_gate, NULL, model.layers[il].ffn_gate_scale,
  8563. NULL, NULL, NULL,
  8564. NULL,
  8565. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8566. cb(cur, "ffn_sub_out", il);
  8567. cur = build_norm(cur,
  8568. model.layers[il].ffn_sub_norm, NULL,
  8569. LLM_NORM_RMS, il);
  8570. cb(cur, "ffn_sub_norm", il);
  8571. cur = build_lora_mm(model.layers[il].ffn_down, cur);
  8572. if (model.layers[il].ffn_down_scale) {
  8573. cur = ggml_mul(ctx0, cur, model.layers[il].ffn_down_scale);
  8574. }
  8575. cb(cur, "ffn_down", il);
  8576. cur = ggml_add(ctx0, cur, ffn_inp);
  8577. cb(cur, "l_out", il);
  8578. // input for next layer
  8579. inpL = cur;
  8580. }
  8581. cur = inpL;
  8582. cur = build_norm(cur,
  8583. model.output_norm, NULL,
  8584. LLM_NORM_RMS, -1);
  8585. cb(cur, "result_norm", -1);
  8586. res->t_embd = cur;
  8587. // lm_head
  8588. // FIXME: do not use model.tok_embd directly, duplicate as model.output
  8589. cur = build_lora_mm(model.tok_embd, cur);
  8590. cb(cur, "result_output", -1);
  8591. res->t_logits = cur;
  8592. ggml_build_forward_expand(gf, cur);
  8593. }
  8594. };
  8595. struct llm_build_t5_enc : public llm_graph_context {
  8596. llm_build_t5_enc(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8597. const int64_t n_embd_head = hparams.n_embd_head_v;
  8598. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8599. ggml_tensor * cur;
  8600. ggml_tensor * inpL;
  8601. inpL = build_inp_embd(model.tok_embd);
  8602. ggml_tensor * pos_bucket_enc = build_inp_pos_bucket_enc();
  8603. auto * inp_attn = build_attn_inp_no_cache();
  8604. for (int il = 0; il < n_layer; ++il) {
  8605. ggml_tensor * inpSA = inpL;
  8606. // norm
  8607. cur = build_norm(inpL,
  8608. model.layers[il].attn_norm_enc, NULL,
  8609. LLM_NORM_RMS, il);
  8610. cb(cur, "attn_norm", il);
  8611. // self-attention
  8612. {
  8613. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq_enc, cur);
  8614. cb(Qcur, "Qcur", il);
  8615. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk_enc, cur);
  8616. cb(Kcur, "Kcur", il);
  8617. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv_enc, cur);
  8618. cb(Vcur, "Vcur", il);
  8619. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8620. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8621. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8622. ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b_enc ? model.layers[il].attn_rel_b_enc : model.layers[0].attn_rel_b_enc;
  8623. ggml_tensor * kq_b = build_pos_bias(pos_bucket_enc, attn_rel_b);
  8624. cur = build_attn(inp_attn, gf,
  8625. model.layers[il].wo_enc, nullptr,
  8626. Qcur, Kcur, Vcur, kq_b, nullptr, 1.0f, il);
  8627. cb(cur, "kqv_out", il);
  8628. }
  8629. if (il == n_layer - 1) {
  8630. // skip computing output for unused tokens
  8631. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8632. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8633. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8634. }
  8635. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8636. cb(ffn_inp, "ffn_inp", il);
  8637. // feed-forward network
  8638. {
  8639. cur = build_norm(ffn_inp,
  8640. model.layers[il].ffn_norm_enc, NULL,
  8641. LLM_NORM_RMS, il);
  8642. cb(cur, "ffn_norm", il);
  8643. // T5 uses relu, flan-T5 uses gelu-gated
  8644. cur = build_ffn(cur,
  8645. model.layers[il].ffn_up_enc, NULL, NULL,
  8646. model.layers[il].ffn_gate_enc, NULL, NULL,
  8647. model.layers[il].ffn_down_enc, NULL, NULL,
  8648. NULL,
  8649. model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU,
  8650. model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ,
  8651. il);
  8652. cb(cur, "ffn_out", il);
  8653. }
  8654. cur = ggml_add(ctx0, cur, ffn_inp);
  8655. cb(cur, "ffn_out", il);
  8656. cur = build_cvec(cur, il);
  8657. cb(cur, "l_out", il);
  8658. // input for next layer
  8659. inpL = cur;
  8660. }
  8661. cur = inpL;
  8662. cb(cur, "result_embd", -1);
  8663. cur = build_norm(cur,
  8664. model.output_norm_enc, NULL,
  8665. LLM_NORM_RMS, -1);
  8666. cb(cur, "result_norm", -1);
  8667. res->t_embd = cur;
  8668. ggml_build_forward_expand(gf, cur);
  8669. }
  8670. };
  8671. struct llm_build_t5_dec : public llm_graph_context {
  8672. llm_build_t5_dec(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8673. const int64_t n_embd_head = hparams.n_embd_head_v;
  8674. //const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  8675. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8676. ggml_tensor * cur;
  8677. ggml_tensor * inpL;
  8678. inpL = build_inp_embd(model.tok_embd);
  8679. ggml_tensor * embd_enc = build_inp_cross_embd();
  8680. ggml_tensor * pos_bucket_dec = build_inp_pos_bucket_dec();
  8681. const int64_t n_outputs_enc = embd_enc->ne[1];
  8682. auto * inp_attn_self = build_attn_inp_kv_unified();
  8683. auto * inp_attn_cross = build_attn_inp_cross();
  8684. for (int il = 0; il < n_layer; ++il) {
  8685. ggml_tensor * inpSA = inpL;
  8686. // norm
  8687. cur = build_norm(inpL,
  8688. model.layers[il].attn_norm, NULL,
  8689. LLM_NORM_RMS, il);
  8690. cb(cur, "attn_norm", il);
  8691. // self-attention
  8692. {
  8693. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  8694. cb(Qcur, "Qcur", il);
  8695. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  8696. cb(Kcur, "Kcur", il);
  8697. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  8698. cb(Vcur, "Vcur", il);
  8699. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8700. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8701. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8702. ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b ? model.layers[il].attn_rel_b : model.layers[0].attn_rel_b;
  8703. ggml_tensor * kq_b = build_pos_bias(pos_bucket_dec, attn_rel_b);
  8704. cur = build_attn(inp_attn_self, gf,
  8705. model.layers[il].wo, model.layers[il].bo,
  8706. Qcur, Kcur, Vcur, kq_b, nullptr, 1.0f, il);
  8707. cb(cur, "kqv_out", il);
  8708. }
  8709. cur = ggml_add(ctx0, cur, inpSA);
  8710. cb(cur, "cross_inp", il);
  8711. ggml_tensor * inpCA = cur;
  8712. // norm
  8713. cur = build_norm(cur,
  8714. model.layers[il].attn_norm_cross, NULL,
  8715. LLM_NORM_RMS, il);
  8716. cb(cur, "attn_norm_cross", il);
  8717. // cross-attention
  8718. {
  8719. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq_cross, cur);
  8720. cb(Qcur, "Qcur", il);
  8721. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk_cross, embd_enc);
  8722. cb(Kcur, "Kcur", il);
  8723. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv_cross, embd_enc);
  8724. cb(Vcur, "Vcur", il);
  8725. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8726. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_outputs_enc);
  8727. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_outputs_enc);
  8728. cur = build_attn(inp_attn_cross, gf,
  8729. model.layers[il].wo_cross, nullptr,
  8730. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il);
  8731. cb(cur, "kqv_out", il);
  8732. //ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
  8733. //ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
  8734. //ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  8735. //cb(kq, "kq", il);
  8736. //kq = ggml_soft_max_ext(ctx0, kq, KQ_mask_cross, 1.0f, hparams.f_max_alibi_bias);
  8737. //cb(kq, "kq_soft_max_ext", il);
  8738. //ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_outputs_enc)));
  8739. //cb(v, "v", il);
  8740. //ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_outputs_enc, n_embd_head, n_head_kv), kq);
  8741. //cb(kqv, "kqv", il);
  8742. //ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  8743. //cb(kqv_merged, "kqv_merged", il);
  8744. //cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens);
  8745. //cb(cur, "kqv_merged_cont", il);
  8746. //ggml_build_forward_expand(gf, cur);
  8747. //cur = build_lora_mm(model.layers[il].wo_cross, cur);
  8748. //cb(cur, "kqv_out", il);
  8749. }
  8750. if (il == n_layer - 1) {
  8751. // skip computing output for unused tokens
  8752. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8753. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8754. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8755. inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids);
  8756. }
  8757. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpCA);
  8758. cb(ffn_inp, "ffn_inp", il);
  8759. // feed-forward network
  8760. {
  8761. cur = build_norm(ffn_inp,
  8762. model.layers[il].ffn_norm, NULL,
  8763. LLM_NORM_RMS, il);
  8764. cb(cur, "ffn_norm", il);
  8765. // T5 uses relu, flan-T5 uses gelu-gated
  8766. cur = build_ffn(cur,
  8767. model.layers[il].ffn_up, NULL, NULL,
  8768. model.layers[il].ffn_gate, NULL, NULL,
  8769. model.layers[il].ffn_down, NULL, NULL,
  8770. NULL,
  8771. model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU,
  8772. model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ,
  8773. il);
  8774. cb(cur, "ffn_out", il);
  8775. }
  8776. cur = ggml_add(ctx0, cur, ffn_inp);
  8777. cb(cur, "ffn_out", il);
  8778. cur = build_cvec(cur, il);
  8779. cb(cur, "l_out", il);
  8780. // input for next layer
  8781. inpL = cur;
  8782. }
  8783. cur = inpL;
  8784. cb(cur, "result_embd", -1);
  8785. cur = build_norm(cur,
  8786. model.output_norm, NULL,
  8787. LLM_NORM_RMS, -1);
  8788. cb(cur, "result_norm", -1);
  8789. res->t_embd = cur;
  8790. // lm_head
  8791. cur = build_lora_mm(model.output, cur);
  8792. cb(cur, "result_output", -1);
  8793. res->t_logits = cur;
  8794. ggml_build_forward_expand(gf, cur);
  8795. }
  8796. };
  8797. struct llm_build_jais : public llm_graph_context {
  8798. llm_build_jais(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8799. const int64_t n_embd_head = hparams.n_embd_head_v;
  8800. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  8801. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8802. ggml_tensor * cur;
  8803. ggml_tensor * inpL;
  8804. inpL = build_inp_embd(model.tok_embd);
  8805. auto * inp_attn = build_attn_inp_kv_unified();
  8806. for (int il = 0; il < n_layer; ++il) {
  8807. cur = build_norm(inpL,
  8808. model.layers[il].attn_norm,
  8809. model.layers[il].attn_norm_b,
  8810. LLM_NORM, il);
  8811. cb(cur, "attn_norm", il);
  8812. // self-attention
  8813. {
  8814. cur = build_lora_mm(model.layers[il].wqkv, cur);
  8815. cb(cur, "wqkv", il);
  8816. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  8817. cb(cur, "bqkv", il);
  8818. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*cur->nb[0]*(n_embd)));
  8819. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*cur->nb[0]*(n_embd)));
  8820. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*cur->nb[0]*(n_embd + n_embd_gqa)));
  8821. cb(Qcur, "Qcur", il);
  8822. cb(Kcur, "Kcur", il);
  8823. cb(Vcur, "Vcur", il);
  8824. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8825. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8826. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8827. cur = build_attn(inp_attn, gf,
  8828. model.layers[il].wo, model.layers[il].bo,
  8829. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/float(n_embd_head), il);
  8830. }
  8831. if (il == n_layer - 1) {
  8832. // skip computing output for unused tokens
  8833. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8834. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8835. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8836. }
  8837. // add the input
  8838. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  8839. cb(ffn_inp, "ffn_inp", il);
  8840. // FF
  8841. {
  8842. cur = build_norm(ffn_inp,
  8843. model.layers[il].ffn_norm,
  8844. model.layers[il].ffn_norm_b,
  8845. LLM_NORM, il);
  8846. cb(cur, "ffn_norm", il);
  8847. cur = build_ffn(cur,
  8848. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  8849. model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
  8850. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  8851. NULL,
  8852. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8853. cb(cur, "ffn_out", il);
  8854. }
  8855. inpL = ggml_add(ctx0, cur, ffn_inp);
  8856. cb(inpL, "l_out", il);
  8857. }
  8858. cur = build_norm(inpL,
  8859. model.output_norm,
  8860. model.output_norm_b,
  8861. LLM_NORM, -1);
  8862. cb(cur, "result_norm", -1);
  8863. res->t_embd = cur;
  8864. cur = build_lora_mm(model.output, cur);
  8865. cb(cur, "result_output", -1);
  8866. res->t_logits = cur;
  8867. ggml_build_forward_expand(gf, cur);
  8868. }
  8869. };
  8870. struct llm_build_chatglm : public llm_graph_context {
  8871. llm_build_chatglm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8872. const int64_t n_embd_head = hparams.n_embd_head_v;
  8873. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  8874. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8875. ggml_tensor * cur;
  8876. ggml_tensor * inpL;
  8877. inpL = build_inp_embd(model.tok_embd);
  8878. // inp_pos - contains the positions
  8879. ggml_tensor * inp_pos = build_inp_pos();
  8880. auto * inp_attn = build_attn_inp_kv_unified();
  8881. for (int il = 0; il < n_layer; ++il) {
  8882. ggml_tensor * inpSA = inpL;
  8883. cur = build_norm(inpL,
  8884. model.layers[il].attn_norm,
  8885. NULL,
  8886. LLM_NORM_RMS, il);
  8887. cb(cur, "attn_norm", il);
  8888. // self-attention
  8889. {
  8890. ggml_tensor * Qcur = nullptr;
  8891. ggml_tensor * Kcur = nullptr;
  8892. ggml_tensor * Vcur = nullptr;
  8893. if (model.layers[il].wqkv == nullptr) {
  8894. Qcur = build_lora_mm(model.layers[il].wq, cur);
  8895. if (model.layers[il].bq) {
  8896. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8897. }
  8898. Kcur = build_lora_mm(model.layers[il].wk, cur);
  8899. if (model.layers[il].bk) {
  8900. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8901. }
  8902. Vcur = build_lora_mm(model.layers[il].wv, cur);
  8903. if (model.layers[il].bv) {
  8904. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8905. }
  8906. } else {
  8907. cur = build_lora_mm(model.layers[il].wqkv, cur);
  8908. cb(cur, "wqkv", il);
  8909. if (model.layers[il].bqkv) {
  8910. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  8911. cb(cur, "bqkv", il);
  8912. }
  8913. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  8914. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  8915. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  8916. }
  8917. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8918. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8919. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8920. //printf("freq_base: %f freq_scale: %f ext_factor: %f attn_factor: %f\n", freq_base, freq_scale, ext_factor, attn_factor);
  8921. Qcur = ggml_rope_ext(
  8922. ctx0, Qcur, inp_pos, nullptr,
  8923. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8924. ext_factor, attn_factor, beta_fast, beta_slow
  8925. );
  8926. Kcur = ggml_rope_ext(
  8927. ctx0, Kcur, inp_pos, nullptr,
  8928. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8929. ext_factor, attn_factor, beta_fast, beta_slow
  8930. );
  8931. cb(Qcur, "Qcur", il);
  8932. cb(Kcur, "Kcur", il);
  8933. cb(Vcur, "Vcur", il);
  8934. cur = build_attn(inp_attn, gf,
  8935. model.layers[il].wo, NULL,
  8936. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  8937. }
  8938. if (il == n_layer - 1) {
  8939. // skip computing output for unused tokens
  8940. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8941. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8942. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8943. }
  8944. // Add the input
  8945. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8946. cb(ffn_inp, "ffn_inp", il);
  8947. // FF
  8948. {
  8949. cur = build_norm(ffn_inp,
  8950. model.layers[il].ffn_norm,
  8951. NULL,
  8952. LLM_NORM_RMS, il);
  8953. cb(cur, "ffn_norm", il);
  8954. cur = build_ffn(cur,
  8955. model.layers[il].ffn_up, NULL, NULL,
  8956. NULL, NULL, NULL,
  8957. model.layers[il].ffn_down, NULL, NULL,
  8958. NULL,
  8959. LLM_FFN_SWIGLU, LLM_FFN_SEQ, il);
  8960. cb(cur, "ffn_out", il);
  8961. }
  8962. inpL = ggml_add(ctx0, cur, ffn_inp);
  8963. cb(inpL, "l_out", il);
  8964. }
  8965. cur = build_norm(inpL,
  8966. model.output_norm,
  8967. NULL,
  8968. LLM_NORM_RMS, -1);
  8969. cb(cur, "result_norm", -1);
  8970. res->t_embd = cur;
  8971. cur = build_lora_mm(model.output, cur);
  8972. cb(cur, "result_output", -1);
  8973. res->t_logits = cur;
  8974. ggml_build_forward_expand(gf, cur);
  8975. }
  8976. };
  8977. struct llm_build_glm4 : public llm_graph_context {
  8978. llm_build_glm4(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8979. const int64_t n_embd_head = hparams.n_embd_head_v;
  8980. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  8981. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8982. ggml_tensor * cur;
  8983. ggml_tensor * inpL;
  8984. inpL = build_inp_embd(model.tok_embd);
  8985. // inp_pos - contains the positions
  8986. ggml_tensor * inp_pos = build_inp_pos();
  8987. auto * inp_attn = build_attn_inp_kv_unified();
  8988. for (int il = 0; il < n_layer; ++il) {
  8989. ggml_tensor * inpSA = inpL;
  8990. // Pre-attention norm
  8991. cur = build_norm(inpL,
  8992. model.layers[il].attn_norm,
  8993. NULL,
  8994. LLM_NORM_RMS, il);
  8995. cb(cur, "attn_norm", il);
  8996. // self-attention
  8997. {
  8998. ggml_tensor * Qcur = nullptr;
  8999. ggml_tensor * Kcur = nullptr;
  9000. ggml_tensor * Vcur = nullptr;
  9001. if (model.layers[il].wqkv == nullptr) {
  9002. Qcur = build_lora_mm(model.layers[il].wq, cur);
  9003. if (model.layers[il].bq) {
  9004. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  9005. }
  9006. Kcur = build_lora_mm(model.layers[il].wk, cur);
  9007. if (model.layers[il].bk) {
  9008. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  9009. }
  9010. Vcur = build_lora_mm(model.layers[il].wv, cur);
  9011. if (model.layers[il].bv) {
  9012. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  9013. }
  9014. } else {
  9015. cur = build_lora_mm(model.layers[il].wqkv, cur);
  9016. cb(cur, "wqkv", il);
  9017. if (model.layers[il].bqkv) {
  9018. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  9019. cb(cur, "bqkv", il);
  9020. }
  9021. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  9022. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  9023. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  9024. }
  9025. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  9026. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  9027. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  9028. Qcur = ggml_rope_ext(
  9029. ctx0, Qcur, inp_pos, nullptr,
  9030. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9031. ext_factor, attn_factor, beta_fast, beta_slow
  9032. );
  9033. Kcur = ggml_rope_ext(
  9034. ctx0, Kcur, inp_pos, nullptr,
  9035. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9036. ext_factor, attn_factor, beta_fast, beta_slow
  9037. );
  9038. cb(Qcur, "Qcur", il);
  9039. cb(Kcur, "Kcur", il);
  9040. cb(Vcur, "Vcur", il);
  9041. cur = build_attn(inp_attn, gf,
  9042. model.layers[il].wo, NULL,
  9043. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  9044. }
  9045. if (il == n_layer - 1) {
  9046. // skip computing output for unused tokens
  9047. ggml_tensor * inp_out_ids = build_inp_out_ids();
  9048. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  9049. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  9050. }
  9051. // Post-attention norm (new!)
  9052. cur = build_norm(cur,
  9053. model.layers[il].attn_post_norm,
  9054. NULL,
  9055. LLM_NORM_RMS, il);
  9056. cb(cur, "post_attn_norm", il);
  9057. // Add the input (residual connection after post-attention norm)
  9058. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  9059. cb(ffn_inp, "ffn_inp", il);
  9060. // FF
  9061. {
  9062. // Pre-MLP norm
  9063. cur = build_norm(ffn_inp,
  9064. model.layers[il].ffn_norm,
  9065. NULL,
  9066. LLM_NORM_RMS, il);
  9067. cb(cur, "ffn_norm", il);
  9068. // MLP
  9069. cur = build_ffn(cur,
  9070. model.layers[il].ffn_up, NULL, NULL,
  9071. NULL, NULL, NULL,
  9072. model.layers[il].ffn_down, NULL, NULL,
  9073. NULL,
  9074. LLM_FFN_SWIGLU, LLM_FFN_SEQ, il);
  9075. cb(cur, "ffn_out", il);
  9076. // Post-MLP norm
  9077. cur = build_norm(cur,
  9078. model.layers[il].ffn_post_norm,
  9079. NULL,
  9080. LLM_NORM_RMS, il);
  9081. cb(cur, "post_mlp_norm", il);
  9082. }
  9083. // Add residual connection after post-MLP norm
  9084. inpL = ggml_add(ctx0, cur, ffn_inp);
  9085. cb(inpL, "l_out", il);
  9086. }
  9087. // Final norm
  9088. cur = build_norm(inpL,
  9089. model.output_norm,
  9090. NULL,
  9091. LLM_NORM_RMS, -1);
  9092. cb(cur, "result_norm", -1);
  9093. res->t_embd = cur;
  9094. // Output projection
  9095. cur = build_lora_mm(model.output, cur);
  9096. cb(cur, "result_output", -1);
  9097. res->t_logits = cur;
  9098. ggml_build_forward_expand(gf, cur);
  9099. }
  9100. };
  9101. struct llm_build_nemotron : public llm_graph_context {
  9102. llm_build_nemotron(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  9103. const int64_t n_embd_head = hparams.n_embd_head_v;
  9104. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  9105. //GGML_ASSERT(n_embd_head == hparams.n_rot);
  9106. ggml_tensor * cur;
  9107. ggml_tensor * inpL;
  9108. inpL = build_inp_embd(model.tok_embd);
  9109. // inp_pos - contains the positions
  9110. ggml_tensor * inp_pos = build_inp_pos();
  9111. auto * inp_attn = build_attn_inp_kv_unified();
  9112. for (int il = 0; il < n_layer; ++il) {
  9113. ggml_tensor * inpSA = inpL;
  9114. // norm
  9115. cur = build_norm(inpL,
  9116. model.layers[il].attn_norm,
  9117. model.layers[il].attn_norm_b,
  9118. LLM_NORM, il);
  9119. cb(cur, "attn_norm", il);
  9120. // self-attention
  9121. {
  9122. // compute Q and K and RoPE them
  9123. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  9124. cb(Qcur, "Qcur", il);
  9125. if (model.layers[il].bq) {
  9126. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  9127. cb(Qcur, "Qcur", il);
  9128. }
  9129. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  9130. cb(Kcur, "Kcur", il);
  9131. if (model.layers[il].bk) {
  9132. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  9133. cb(Kcur, "Kcur", il);
  9134. }
  9135. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  9136. cb(Vcur, "Vcur", il);
  9137. if (model.layers[il].bv) {
  9138. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  9139. cb(Vcur, "Vcur", il);
  9140. }
  9141. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  9142. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  9143. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  9144. Qcur = ggml_rope_ext(
  9145. ctx0, Qcur, inp_pos, nullptr,
  9146. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9147. ext_factor, attn_factor, beta_fast, beta_slow
  9148. );
  9149. Kcur = ggml_rope_ext(
  9150. ctx0, Kcur, inp_pos, nullptr,
  9151. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9152. ext_factor, attn_factor, beta_fast, beta_slow
  9153. );
  9154. cb(Qcur, "Qcur", il);
  9155. cb(Kcur, "Kcur", il);
  9156. cb(Vcur, "Vcur", il);
  9157. cur = build_attn(inp_attn, gf,
  9158. model.layers[il].wo, model.layers[il].bo,
  9159. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  9160. }
  9161. if (il == n_layer - 1) {
  9162. // skip computing output for unused tokens
  9163. ggml_tensor * inp_out_ids = build_inp_out_ids();
  9164. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  9165. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  9166. }
  9167. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  9168. cb(ffn_inp, "ffn_inp", il);
  9169. // feed-forward network
  9170. cur = build_norm(ffn_inp,
  9171. model.layers[il].ffn_norm,
  9172. model.layers[il].ffn_norm_b,
  9173. LLM_NORM, il);
  9174. cb(cur, "ffn_norm", il);
  9175. cur = build_ffn(cur,
  9176. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  9177. NULL, NULL, NULL,
  9178. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  9179. NULL,
  9180. LLM_FFN_RELU_SQR, LLM_FFN_SEQ, il);
  9181. cur = ggml_add(ctx0, cur, ffn_inp);
  9182. cb(cur, "ffn_out", il);
  9183. cur = build_cvec(cur, il);
  9184. cb(cur, "l_out", il);
  9185. // input for next layer
  9186. inpL = cur;
  9187. }
  9188. cur = inpL;
  9189. cur = build_norm(cur,
  9190. model.output_norm, model.output_norm_b,
  9191. LLM_NORM, -1);
  9192. cb(cur, "result_norm", -1);
  9193. res->t_embd = cur;
  9194. // lm_head
  9195. cur = build_lora_mm(model.output, cur);
  9196. cb(cur, "result_output", -1);
  9197. res->t_logits = cur;
  9198. ggml_build_forward_expand(gf, cur);
  9199. }
  9200. };
  9201. struct llm_build_exaone : public llm_graph_context {
  9202. llm_build_exaone(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  9203. const int64_t n_embd_head = hparams.n_embd_head_v;
  9204. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  9205. GGML_ASSERT(n_embd_head == hparams.n_rot);
  9206. ggml_tensor * cur;
  9207. ggml_tensor * inpL;
  9208. inpL = build_inp_embd(model.tok_embd);
  9209. // inp_pos - contains the positions
  9210. ggml_tensor * inp_pos = build_inp_pos();
  9211. auto * inp_attn = build_attn_inp_kv_unified();
  9212. for (int il = 0; il < n_layer; ++il) {
  9213. ggml_tensor * inpSA = inpL;
  9214. // norm
  9215. cur = build_norm(inpL,
  9216. model.layers[il].attn_norm, NULL,
  9217. LLM_NORM_RMS, il);
  9218. cb(cur, "attn_norm", il);
  9219. // self-attention
  9220. {
  9221. // rope freq factors for llama3; may return nullptr for llama2 and other models
  9222. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  9223. // compute Q and K and RoPE them
  9224. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  9225. cb(Qcur, "Qcur", il);
  9226. if (model.layers[il].bq) {
  9227. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  9228. cb(Qcur, "Qcur", il);
  9229. }
  9230. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  9231. cb(Kcur, "Kcur", il);
  9232. if (model.layers[il].bk) {
  9233. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  9234. cb(Kcur, "Kcur", il);
  9235. }
  9236. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  9237. cb(Vcur, "Vcur", il);
  9238. if (model.layers[il].bv) {
  9239. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  9240. cb(Vcur, "Vcur", il);
  9241. }
  9242. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  9243. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  9244. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  9245. Qcur = ggml_rope_ext(
  9246. ctx0, Qcur, inp_pos, rope_factors,
  9247. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9248. ext_factor, attn_factor, beta_fast, beta_slow
  9249. );
  9250. Kcur = ggml_rope_ext(
  9251. ctx0, Kcur, inp_pos, rope_factors,
  9252. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9253. ext_factor, attn_factor, beta_fast, beta_slow
  9254. );
  9255. cb(Qcur, "Qcur", il);
  9256. cb(Kcur, "Kcur", il);
  9257. cb(Vcur, "Vcur", il);
  9258. cur = build_attn(inp_attn, gf,
  9259. model.layers[il].wo, model.layers[il].bo,
  9260. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  9261. }
  9262. if (il == n_layer - 1) {
  9263. // skip computing output for unused tokens
  9264. ggml_tensor * inp_out_ids = build_inp_out_ids();
  9265. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  9266. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  9267. }
  9268. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  9269. cb(ffn_inp, "ffn_inp", il);
  9270. // feed-forward network
  9271. cur = build_norm(ffn_inp,
  9272. model.layers[il].ffn_norm, NULL,
  9273. LLM_NORM_RMS, il);
  9274. cb(cur, "ffn_norm", il);
  9275. cur = build_ffn(cur,
  9276. model.layers[il].ffn_up, NULL, NULL,
  9277. model.layers[il].ffn_gate, NULL, NULL,
  9278. model.layers[il].ffn_down, NULL, NULL,
  9279. NULL,
  9280. LLM_FFN_SILU, LLM_FFN_PAR, il);
  9281. cb(cur, "ffn_out", il);
  9282. cur = ggml_add(ctx0, cur, ffn_inp);
  9283. cb(cur, "ffn_out", il);
  9284. cur = build_cvec(cur, il);
  9285. cb(cur, "l_out", il);
  9286. // input for next layer
  9287. inpL = cur;
  9288. }
  9289. cur = inpL;
  9290. cur = build_norm(cur,
  9291. model.output_norm, NULL,
  9292. LLM_NORM_RMS, -1);
  9293. cb(cur, "result_norm", -1);
  9294. res->t_embd = cur;
  9295. // lm_head
  9296. cur = build_lora_mm(model.output, cur);
  9297. cb(cur, "result_output", -1);
  9298. res->t_logits = cur;
  9299. ggml_build_forward_expand(gf, cur);
  9300. }
  9301. };
  9302. struct llm_build_rwkv6_base : public llm_graph_context {
  9303. const llama_model & model;
  9304. llm_build_rwkv6_base(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params), model(model) {
  9305. }
  9306. ggml_tensor * build_rwkv6_channel_mix(
  9307. const llama_layer * layer,
  9308. ggml_tensor * cur,
  9309. ggml_tensor * x_prev,
  9310. llm_arch arch) const {
  9311. ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur);
  9312. switch (arch) {
  9313. case LLM_ARCH_RWKV6:
  9314. {
  9315. ggml_tensor * xk = ggml_add(ctx0, ggml_mul(ctx0, sx, layer->channel_mix_lerp_k), cur);
  9316. ggml_tensor * xr = ggml_add(ctx0, ggml_mul(ctx0, sx, layer->channel_mix_lerp_r), cur);
  9317. ggml_tensor * r = ggml_sigmoid(ctx0, build_lora_mm(layer->channel_mix_receptance, xr));
  9318. ggml_tensor * k = ggml_sqr(
  9319. ctx0,
  9320. ggml_relu(
  9321. ctx0,
  9322. build_lora_mm(layer->channel_mix_key, xk)
  9323. )
  9324. );
  9325. cur = ggml_mul(ctx0, r, build_lora_mm(layer->channel_mix_value, k));
  9326. } break;
  9327. default:
  9328. GGML_ABORT("fatal error");
  9329. }
  9330. return cur;
  9331. }
  9332. ggml_tensor * build_rwkv6_time_mix(
  9333. ggml_cgraph * gf,
  9334. ggml_tensor * cur,
  9335. ggml_tensor * x_prev,
  9336. ggml_tensor * state_copy,
  9337. const llama_ubatch & ubatch,
  9338. int il) const {
  9339. const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
  9340. const auto n_tokens = ubatch.n_tokens;
  9341. const auto n_seqs = ubatch.n_seqs;
  9342. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9343. const auto n_embd = hparams.n_embd;
  9344. const auto head_size = hparams.wkv_head_size;
  9345. const auto n_head = n_embd / head_size;
  9346. const auto n_head_kv = hparams.n_head_kv(il);
  9347. const auto kv_head = kv_state->get_head();
  9348. const auto & layer = model.layers[il];
  9349. bool is_qrwkv = layer.time_mix_first == nullptr;
  9350. ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur);
  9351. sx = ggml_reshape_2d(ctx0, sx, n_embd, n_tokens);
  9352. cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
  9353. ggml_tensor * xxx = ggml_add(ctx0, ggml_mul(ctx0, sx, layer.time_mix_lerp_x), cur);
  9354. xxx = ggml_reshape_4d(
  9355. ctx0,
  9356. ggml_tanh(
  9357. ctx0,
  9358. ggml_mul_mat(ctx0, layer.time_mix_w1, xxx)
  9359. ),
  9360. layer.time_mix_w1->ne[1] / 5, 1, 5, n_tokens
  9361. );
  9362. xxx = ggml_cont(ctx0, ggml_permute(ctx0, xxx, 0, 1, 3, 2));
  9363. xxx = ggml_mul_mat(
  9364. ctx0,
  9365. ggml_reshape_4d(
  9366. ctx0,
  9367. layer.time_mix_w2,
  9368. layer.time_mix_w2->ne[0], layer.time_mix_w2->ne[1], 1, 5
  9369. ),
  9370. xxx
  9371. );
  9372. ggml_tensor *xw, *xk, *xv, *xr, *xg;
  9373. if (layer.time_mix_lerp_fused) {
  9374. // fusing these weights makes some performance improvement
  9375. sx = ggml_reshape_3d(ctx0, sx, n_embd, 1, n_tokens);
  9376. cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens);
  9377. xxx = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xxx, layer.time_mix_lerp_fused), sx), cur);
  9378. xw = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], 0);
  9379. xk = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float));
  9380. xv = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float));
  9381. xr = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float));
  9382. xg = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float));
  9383. } else {
  9384. // for backward compatibility
  9385. xw = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], 0);
  9386. xk = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float));
  9387. xv = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float));
  9388. xr = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float));
  9389. xg = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float));
  9390. xw = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xw, layer.time_mix_lerp_w), sx), cur);
  9391. xk = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xk, layer.time_mix_lerp_k), sx), cur);
  9392. xv = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xv, layer.time_mix_lerp_v), sx), cur);
  9393. xr = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xr, layer.time_mix_lerp_r), sx), cur);
  9394. xg = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xg, layer.time_mix_lerp_g), sx), cur);
  9395. }
  9396. ggml_tensor * r = build_lora_mm(layer.time_mix_receptance, xr);
  9397. ggml_tensor * k = build_lora_mm(layer.time_mix_key, xk);
  9398. ggml_tensor * v = build_lora_mm(layer.time_mix_value, xv);
  9399. if (layer.time_mix_receptance_b) {
  9400. r = ggml_add(ctx0, r, layer.time_mix_receptance_b);
  9401. }
  9402. if (layer.time_mix_key_b) {
  9403. k = ggml_add(ctx0, k, layer.time_mix_key_b);
  9404. }
  9405. if (layer.time_mix_value_b) {
  9406. v = ggml_add(ctx0, v, layer.time_mix_value_b);
  9407. }
  9408. ggml_tensor * g = build_lora_mm(layer.time_mix_gate, xg);
  9409. if (is_qrwkv) {
  9410. g = ggml_sigmoid(ctx0, g);
  9411. } else {
  9412. g = ggml_silu(ctx0, g);
  9413. }
  9414. if (n_head_kv != 0 && n_head_kv != n_head) {
  9415. GGML_ASSERT(n_head % n_head_kv == 0);
  9416. k = ggml_reshape_4d(ctx0, k, head_size, 1, n_head_kv, n_tokens);
  9417. v = ggml_reshape_4d(ctx0, v, head_size, 1, n_head_kv, n_tokens);
  9418. ggml_tensor * tmp = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, head_size, n_head / n_head_kv, n_head_kv, n_tokens);
  9419. k = ggml_repeat(ctx0, k, tmp);
  9420. v = ggml_repeat(ctx0, v, tmp);
  9421. }
  9422. k = ggml_reshape_3d(ctx0, k, head_size, n_head, n_tokens);
  9423. v = ggml_reshape_3d(ctx0, v, head_size, n_head, n_tokens);
  9424. r = ggml_reshape_3d(ctx0, r, head_size, n_head, n_tokens);
  9425. ggml_tensor * w = ggml_mul_mat(
  9426. ctx0,
  9427. layer.time_mix_decay_w2,
  9428. ggml_tanh(
  9429. ctx0,
  9430. ggml_mul_mat(ctx0, layer.time_mix_decay_w1, xw)
  9431. )
  9432. );
  9433. w = ggml_add(ctx0, w, layer.time_mix_decay);
  9434. w = ggml_exp(ctx0, ggml_neg(ctx0, ggml_exp(ctx0, w)));
  9435. w = ggml_reshape_3d(ctx0, w, head_size, n_head, n_tokens);
  9436. if (is_qrwkv) {
  9437. // k = k * (1 - w)
  9438. k = ggml_sub(ctx0, k, ggml_mul(ctx0, k, w));
  9439. }
  9440. ggml_tensor * wkv_state = build_recurrent_state(
  9441. gf, kv_state->get_v_l(il), state_copy,
  9442. hparams.n_embd_v_s(), n_seqs);
  9443. ggml_tensor * wkv_output;
  9444. if (is_qrwkv) {
  9445. wkv_output = ggml_gated_linear_attn(ctx0, k, v, r, w, wkv_state, pow(head_size, -0.5f));
  9446. } else {
  9447. wkv_output = ggml_rwkv_wkv6(ctx0, k, v, r, layer.time_mix_first, w, wkv_state);
  9448. }
  9449. cur = ggml_view_1d(ctx0, wkv_output, n_embd * n_tokens, 0);
  9450. wkv_state = ggml_view_1d(ctx0, wkv_output, n_embd * head_size * n_seqs, n_embd * n_tokens * sizeof(float));
  9451. ggml_build_forward_expand(
  9452. gf,
  9453. ggml_cpy(
  9454. ctx0,
  9455. wkv_state,
  9456. ggml_view_1d(
  9457. ctx0,
  9458. kv_state->get_v_l(il),
  9459. hparams.n_embd_v_s() * n_seqs,
  9460. hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_state->get_v_l(il))
  9461. )
  9462. )
  9463. );
  9464. if (!is_qrwkv) {
  9465. // group norm with head_count groups
  9466. cur = ggml_reshape_3d(ctx0, cur, n_embd / n_head, n_head, n_tokens);
  9467. cur = ggml_norm(ctx0, cur, 64e-5f);
  9468. // Convert back to regular vectors.
  9469. cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
  9470. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, layer.time_mix_ln), layer.time_mix_ln_b);
  9471. } else {
  9472. cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
  9473. }
  9474. cur = ggml_mul(ctx0, cur, g);
  9475. cur = build_lora_mm(layer.time_mix_output, cur);
  9476. return ggml_reshape_3d(ctx0, cur, n_embd, n_seq_tokens, n_seqs);
  9477. }
  9478. };
  9479. struct llm_build_rwkv6 : public llm_build_rwkv6_base {
  9480. llm_build_rwkv6(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) {
  9481. GGML_ASSERT(hparams.token_shift_count == 2);
  9482. ggml_tensor * cur;
  9483. ggml_tensor * inpL;
  9484. inpL = build_inp_embd(model.tok_embd);
  9485. inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1);
  9486. ggml_tensor * state_copy = build_inp_s_copy();
  9487. const auto n_embd = hparams.n_embd;
  9488. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9489. const auto n_seqs = ubatch.n_seqs;
  9490. for (int il = 0; il < n_layer; ++il) {
  9491. const llama_layer * layer = &model.layers[il];
  9492. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
  9493. ggml_tensor * token_shift = build_rwkv_token_shift_load(
  9494. gf, state_copy, ubatch, il
  9495. );
  9496. ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0);
  9497. ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift));
  9498. ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM, il);
  9499. cb(att_norm, "attn_norm", il);
  9500. ggml_tensor * x_prev = ggml_concat(
  9501. ctx0,
  9502. att_shift,
  9503. ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0),
  9504. 1
  9505. );
  9506. cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, ubatch, il);
  9507. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  9508. cb(ffn_inp, "ffn_inp", il);
  9509. ggml_tensor * ffn_norm = build_norm(ffn_inp, layer->attn_norm_2, layer->attn_norm_2_b, LLM_NORM, il);
  9510. cb(ffn_norm, "ffn_norm", il);
  9511. x_prev = ggml_concat(
  9512. ctx0,
  9513. ffn_shift,
  9514. ggml_view_3d(ctx0, ffn_norm, n_embd, n_seq_tokens - 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], 0),
  9515. 1
  9516. );
  9517. token_shift = ggml_concat(ctx0,
  9518. ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)),
  9519. ggml_view_3d(ctx0, ffn_norm, n_embd, 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(ffn_norm)),
  9520. 1
  9521. );
  9522. ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
  9523. if (il == n_layer - 1) {
  9524. // skip computing output for unused tokens
  9525. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9526. ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
  9527. ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids);
  9528. x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids);
  9529. cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids);
  9530. }
  9531. cur = build_rwkv6_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV6);
  9532. cur = ggml_add(ctx0, cur, ffn_inp);
  9533. if (hparams.rescale_every_n_layers != 0 && (il + 1) % hparams.rescale_every_n_layers == 0) {
  9534. cur = ggml_scale(ctx0, cur, 0.5F);
  9535. }
  9536. cur = build_cvec(cur, il);
  9537. cb(cur, "l_out", il);
  9538. // input for next layer
  9539. inpL = cur;
  9540. }
  9541. cur = inpL;
  9542. cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM, -1);
  9543. cb(cur, "result_norm", -1);
  9544. res->t_embd = cur;
  9545. cur = build_lora_mm(model.output, cur);
  9546. cb(cur, "result_output", -1);
  9547. res->t_logits = cur;
  9548. ggml_build_forward_expand(gf, cur);
  9549. }
  9550. };
  9551. // ref: https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1/blob/main/modeling_rwkv6qwen2.py
  9552. struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base {
  9553. llm_build_rwkv6qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) {
  9554. GGML_ASSERT(n_embd == hparams.n_embd_k_s());
  9555. ggml_tensor * cur;
  9556. ggml_tensor * inpL;
  9557. inpL = build_inp_embd(model.tok_embd);
  9558. ggml_tensor * state_copy = build_inp_s_copy();
  9559. const auto n_embd = hparams.n_embd;
  9560. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9561. const auto n_seqs = ubatch.n_seqs;
  9562. for (int il = 0; il < n_layer; ++il) {
  9563. const llama_layer * layer = &model.layers[il];
  9564. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
  9565. ggml_tensor * token_shift = build_rwkv_token_shift_load(
  9566. gf, state_copy, ubatch, il
  9567. );
  9568. ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il);
  9569. cb(att_norm, "attn_norm", il);
  9570. ggml_tensor * x_prev = ggml_concat(
  9571. ctx0,
  9572. token_shift,
  9573. ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0),
  9574. 1
  9575. );
  9576. cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, ubatch, il);
  9577. token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm));
  9578. ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
  9579. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  9580. cb(ffn_inp, "ffn_inp", il);
  9581. if (il == n_layer - 1) {
  9582. // skip computing output for unused tokens
  9583. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9584. cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids);
  9585. ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
  9586. }
  9587. // feed-forward network
  9588. cur = build_norm(ffn_inp,
  9589. model.layers[il].ffn_norm, NULL,
  9590. LLM_NORM_RMS, il);
  9591. cb(cur, "ffn_norm", il);
  9592. cur = build_ffn(cur,
  9593. model.layers[il].ffn_up, NULL, NULL,
  9594. model.layers[il].ffn_gate, NULL, NULL,
  9595. model.layers[il].ffn_down, NULL, NULL,
  9596. NULL,
  9597. LLM_FFN_SILU, LLM_FFN_PAR, il);
  9598. cb(cur, "ffn_out", il);
  9599. cur = ggml_add(ctx0, cur, ffn_inp);
  9600. cur = build_cvec(cur, il);
  9601. cb(cur, "l_out", il);
  9602. // input for next layer
  9603. inpL = cur;
  9604. }
  9605. cur = inpL;
  9606. cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM_RMS, -1);
  9607. cb(cur, "result_norm", -1);
  9608. res->t_embd = cur;
  9609. cur = build_lora_mm(model.output, cur);
  9610. cb(cur, "result_output", -1);
  9611. res->t_logits = cur;
  9612. ggml_build_forward_expand(gf, cur);
  9613. }
  9614. };
  9615. struct llm_build_rwkv7_base : public llm_graph_context {
  9616. const llama_model & model;
  9617. llm_build_rwkv7_base(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params), model(model) {
  9618. }
  9619. ggml_tensor * build_rwkv7_channel_mix(
  9620. const llama_layer * layer,
  9621. ggml_tensor * cur,
  9622. ggml_tensor * x_prev,
  9623. llm_arch arch) const {
  9624. ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur);
  9625. switch (arch) {
  9626. case LLM_ARCH_RWKV7:
  9627. {
  9628. ggml_tensor * xk = ggml_add(ctx0, ggml_mul(ctx0, sx, layer->channel_mix_lerp_k), cur);
  9629. ggml_tensor * k = ggml_sqr(
  9630. ctx0,
  9631. ggml_relu(
  9632. ctx0,
  9633. build_lora_mm(layer->channel_mix_key, xk)
  9634. )
  9635. );
  9636. cur = build_lora_mm(layer->channel_mix_value, k);
  9637. } break;
  9638. default:
  9639. GGML_ABORT("fatal error");
  9640. }
  9641. return cur;
  9642. }
  9643. ggml_tensor * build_rwkv7_time_mix(
  9644. ggml_cgraph * gf,
  9645. ggml_tensor * cur,
  9646. ggml_tensor * x_prev,
  9647. ggml_tensor * state_copy,
  9648. ggml_tensor *& first_layer_value,
  9649. const llama_ubatch & ubatch,
  9650. int il) const {
  9651. const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
  9652. const auto n_tokens = ubatch.n_tokens;
  9653. const auto n_seqs = ubatch.n_seqs;
  9654. const auto n_embd = hparams.n_embd;
  9655. const auto head_size = hparams.wkv_head_size;
  9656. const auto head_count = n_embd / head_size;
  9657. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9658. const auto kv_head = kv_state->get_head();
  9659. const auto & layer = model.layers[il];
  9660. bool has_gating = layer.time_mix_g1 && layer.time_mix_g2;
  9661. ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur);
  9662. ggml_tensor * dummy = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_embd, n_seq_tokens, n_seqs, has_gating ? 6 : 5);
  9663. sx = ggml_repeat(ctx0, sx, dummy);
  9664. ggml_tensor * xxx = ggml_add(ctx0, ggml_mul(ctx0, sx, layer.time_mix_lerp_fused), cur);
  9665. ggml_tensor * xr = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], 0);
  9666. ggml_tensor * xw = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float));
  9667. ggml_tensor * xk = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float));
  9668. ggml_tensor * xv = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float));
  9669. ggml_tensor * xa = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float));
  9670. ggml_tensor * xg = has_gating ? ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 5 * sizeof(float)) : nullptr;
  9671. ggml_tensor * r = build_lora_mm(layer.time_mix_receptance, xr);
  9672. ggml_tensor * w = ggml_add(
  9673. ctx0,
  9674. ggml_mul_mat(ctx0, layer.time_mix_w2, ggml_tanh(ctx0, ggml_mul_mat(ctx0, layer.time_mix_w1, xw))),
  9675. layer.time_mix_w0
  9676. );
  9677. w = ggml_exp(ctx0, ggml_scale(ctx0, ggml_sigmoid(ctx0, w), -0.606531));
  9678. ggml_tensor * k = build_lora_mm(layer.time_mix_key, xk);
  9679. ggml_tensor * v = build_lora_mm(layer.time_mix_value, xv);
  9680. if (first_layer_value == nullptr) {
  9681. first_layer_value = v;
  9682. } else {
  9683. // Add the first layer value as a residual connection.
  9684. v = ggml_add(ctx0, v,
  9685. ggml_mul(ctx0,
  9686. ggml_sub(ctx0, first_layer_value, v),
  9687. ggml_sigmoid(ctx0, ggml_add(ctx0,
  9688. ggml_mul_mat(ctx0, layer.time_mix_v2, ggml_mul_mat(ctx0, layer.time_mix_v1, xv)),
  9689. layer.time_mix_v0
  9690. )
  9691. )
  9692. )
  9693. );
  9694. }
  9695. ggml_tensor * g = nullptr;
  9696. if (layer.time_mix_g1 && layer.time_mix_g2) {
  9697. g = ggml_mul_mat(ctx0, layer.time_mix_g2, ggml_sigmoid(ctx0, ggml_mul_mat(ctx0, layer.time_mix_g1, xg)));
  9698. }
  9699. ggml_tensor * a = ggml_sigmoid(ctx0,
  9700. ggml_add(
  9701. ctx0,
  9702. ggml_mul_mat(ctx0, layer.time_mix_a2, ggml_mul_mat(ctx0, layer.time_mix_a1, xa)),
  9703. layer.time_mix_a0
  9704. )
  9705. );
  9706. ggml_tensor * kk = ggml_reshape_3d(ctx0, ggml_mul(ctx0, k, layer.time_mix_k_k), head_size, head_count, n_tokens);
  9707. kk = ggml_l2_norm(ctx0, kk, 1e-12);
  9708. ggml_tensor * ka = ggml_mul(ctx0, k, layer.time_mix_k_a);
  9709. k = ggml_add(ctx0, k, ggml_sub(ctx0, ggml_mul(ctx0, a, ka), ka));
  9710. r = ggml_reshape_3d(ctx0, r, head_size, head_count, n_tokens);
  9711. w = ggml_reshape_3d(ctx0, w, head_size, head_count, n_tokens);
  9712. k = ggml_reshape_3d(ctx0, k, head_size, head_count, n_tokens);
  9713. v = ggml_reshape_3d(ctx0, v, head_size, head_count, n_tokens);
  9714. a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens);
  9715. ggml_tensor * wkv_state = build_recurrent_state(
  9716. gf, kv_state->get_v_l(il), state_copy,
  9717. hparams.n_embd_v_s(), n_seqs);
  9718. ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state);
  9719. cur = ggml_view_1d(ctx0, wkv_output, n_embd * n_tokens, 0);
  9720. wkv_state = ggml_view_1d(ctx0, wkv_output, n_embd * head_size * n_seqs, n_embd * n_tokens * sizeof(float));
  9721. ggml_build_forward_expand(
  9722. gf,
  9723. ggml_cpy(
  9724. ctx0,
  9725. wkv_state,
  9726. ggml_view_1d(
  9727. ctx0,
  9728. kv_state->get_v_l(il),
  9729. hparams.n_embd_v_s() * n_seqs,
  9730. hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_state->get_v_l(il))
  9731. )
  9732. )
  9733. );
  9734. if (layer.time_mix_ln && layer.time_mix_ln_b) {
  9735. // group norm with head_count groups
  9736. cur = ggml_reshape_3d(ctx0, cur, n_embd / head_count, head_count, n_tokens);
  9737. cur = ggml_norm(ctx0, cur, 64e-5f);
  9738. // Convert back to regular vectors.
  9739. cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
  9740. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, layer.time_mix_ln), layer.time_mix_ln_b);
  9741. } else {
  9742. cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
  9743. }
  9744. ggml_tensor * rk = ggml_sum_rows(ctx0,
  9745. ggml_mul(ctx0, ggml_mul(ctx0, k, r), ggml_reshape_2d(ctx0, layer.time_mix_r_k, head_size, head_count)));
  9746. cur = ggml_add(ctx0, cur, ggml_reshape_2d(ctx0, ggml_mul(ctx0, v, rk), n_embd, n_tokens));
  9747. if (has_gating) {
  9748. cur = ggml_mul(ctx0, cur, g);
  9749. }
  9750. cur = build_lora_mm(layer.time_mix_output, cur);
  9751. return ggml_reshape_3d(ctx0, cur, n_embd, n_seq_tokens, n_seqs);
  9752. }
  9753. };
  9754. struct llm_build_rwkv7 : public llm_build_rwkv7_base {
  9755. llm_build_rwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) {
  9756. GGML_ASSERT(hparams.token_shift_count == 2);
  9757. ggml_tensor * cur;
  9758. ggml_tensor * inpL;
  9759. ggml_tensor * v_first = nullptr;
  9760. inpL = build_inp_embd(model.tok_embd);
  9761. inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1);
  9762. ggml_tensor * state_copy = build_inp_s_copy();
  9763. const auto n_embd = hparams.n_embd;
  9764. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9765. const auto n_seqs = ubatch.n_seqs;
  9766. for (int il = 0; il < n_layer; ++il) {
  9767. const llama_layer * layer = &model.layers[il];
  9768. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
  9769. ggml_tensor * token_shift = build_rwkv_token_shift_load(
  9770. gf, state_copy, ubatch, il
  9771. );
  9772. ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0);
  9773. ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift));
  9774. ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM, il);
  9775. cb(att_norm, "attn_norm", il);
  9776. ggml_tensor * x_prev = ggml_concat(
  9777. ctx0,
  9778. att_shift,
  9779. ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0),
  9780. 1
  9781. );
  9782. cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, v_first, ubatch, il);
  9783. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  9784. cb(ffn_inp, "ffn_inp", il);
  9785. ggml_tensor * ffn_norm = build_norm(ffn_inp, layer->attn_norm_2, layer->attn_norm_2_b, LLM_NORM, il);
  9786. cb(ffn_norm, "ffn_norm", il);
  9787. x_prev = ggml_concat(
  9788. ctx0,
  9789. ffn_shift,
  9790. ggml_view_3d(ctx0, ffn_norm, n_embd, n_seq_tokens - 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], 0),
  9791. 1
  9792. );
  9793. token_shift = ggml_concat(ctx0,
  9794. ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)),
  9795. ggml_view_3d(ctx0, ffn_norm, n_embd, 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(ffn_norm)),
  9796. 1
  9797. );
  9798. ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
  9799. if (il == n_layer - 1) {
  9800. // skip computing output for unused tokens
  9801. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9802. ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
  9803. ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids);
  9804. x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids);
  9805. }
  9806. cur = build_rwkv7_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV7);
  9807. cur = ggml_add(ctx0, cur, ffn_inp);
  9808. cur = build_cvec(cur, il);
  9809. cb(cur, "l_out", il);
  9810. // input for next layer
  9811. inpL = cur;
  9812. }
  9813. cur = inpL;
  9814. cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM, -1);
  9815. cb(cur, "result_norm", -1);
  9816. res->t_embd = cur;
  9817. cur = build_lora_mm(model.output, cur);
  9818. cb(cur, "result_output", -1);
  9819. res->t_logits = cur;
  9820. ggml_build_forward_expand(gf, cur);
  9821. }
  9822. };
  9823. struct llm_build_arwkv7 : public llm_build_rwkv7_base {
  9824. llm_build_arwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) {
  9825. GGML_ASSERT(n_embd == hparams.n_embd_k_s());
  9826. ggml_tensor * cur;
  9827. ggml_tensor * inpL;
  9828. ggml_tensor * v_first = nullptr;
  9829. inpL = build_inp_embd(model.tok_embd);
  9830. ggml_tensor * state_copy = build_inp_s_copy();
  9831. const auto n_embd = hparams.n_embd;
  9832. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9833. const auto n_seqs = ubatch.n_seqs;
  9834. for (int il = 0; il < n_layer; ++il) {
  9835. const llama_layer * layer = &model.layers[il];
  9836. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
  9837. ggml_tensor * token_shift = build_rwkv_token_shift_load(
  9838. gf, state_copy, ubatch, il
  9839. );
  9840. ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il);
  9841. cb(att_norm, "attn_norm", il);
  9842. ggml_tensor * x_prev = ggml_concat(
  9843. ctx0,
  9844. token_shift,
  9845. ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0),
  9846. 1
  9847. );
  9848. cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, v_first, ubatch, il);
  9849. token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm));
  9850. ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
  9851. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  9852. cb(ffn_inp, "ffn_inp", il);
  9853. if (il == n_layer - 1) {
  9854. // skip computing output for unused tokens
  9855. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9856. cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids);
  9857. ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
  9858. }
  9859. // feed-forward network
  9860. cur = build_norm(ffn_inp,
  9861. model.layers[il].ffn_norm, NULL,
  9862. LLM_NORM_RMS, il);
  9863. cb(cur, "ffn_norm", il);
  9864. cur = build_ffn(cur,
  9865. model.layers[il].ffn_up, NULL, NULL,
  9866. model.layers[il].ffn_gate, NULL, NULL,
  9867. model.layers[il].ffn_down, NULL, NULL,
  9868. NULL,
  9869. LLM_FFN_SILU, LLM_FFN_PAR, il);
  9870. cb(cur, "ffn_out", il);
  9871. cur = ggml_add(ctx0, cur, ffn_inp);
  9872. cur = build_cvec(cur, il);
  9873. cb(cur, "l_out", il);
  9874. // input for next layer
  9875. inpL = cur;
  9876. }
  9877. cur = inpL;
  9878. cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM_RMS, -1);
  9879. cb(cur, "result_norm", -1);
  9880. res->t_embd = cur;
  9881. cur = build_lora_mm(model.output, cur);
  9882. cb(cur, "result_output", -1);
  9883. res->t_logits = cur;
  9884. ggml_build_forward_expand(gf, cur);
  9885. }
  9886. };
  9887. struct llm_build_granite : public llm_graph_context {
  9888. llm_build_granite(
  9889. const llama_model & model,
  9890. const llm_graph_params & params,
  9891. ggml_cgraph * gf,
  9892. const bool use_rope = true)
  9893. : llm_graph_context(params) {
  9894. const int64_t n_embd_head = hparams.n_embd_head_v;
  9895. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  9896. GGML_ASSERT(n_embd_head == hparams.n_rot);
  9897. ggml_tensor * cur;
  9898. ggml_tensor * inpL;
  9899. inpL = build_inp_embd(model.tok_embd);
  9900. // inp_pos - built only if rope enabled
  9901. ggml_tensor * inp_pos = nullptr;
  9902. if (use_rope) {
  9903. inp_pos = build_inp_pos();
  9904. }
  9905. auto * inp_attn = build_attn_inp_kv_unified();
  9906. const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
  9907. for (int il = 0; il < n_layer; ++il) {
  9908. ggml_tensor * inpSA = inpL;
  9909. // norm
  9910. cur = build_norm(inpL,
  9911. model.layers[il].attn_norm, NULL,
  9912. LLM_NORM_RMS, il);
  9913. cb(cur, "attn_norm", il);
  9914. // self-attention
  9915. {
  9916. // compute Q and K and (optionally) RoPE them
  9917. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  9918. cb(Qcur, "Qcur", il);
  9919. if (model.layers[il].bq) {
  9920. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  9921. cb(Qcur, "Qcur", il);
  9922. }
  9923. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  9924. cb(Kcur, "Kcur", il);
  9925. if (model.layers[il].bk) {
  9926. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  9927. cb(Kcur, "Kcur", il);
  9928. }
  9929. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  9930. cb(Vcur, "Vcur", il);
  9931. if (model.layers[il].bv) {
  9932. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  9933. cb(Vcur, "Vcur", il);
  9934. }
  9935. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  9936. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  9937. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  9938. if (use_rope) {
  9939. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  9940. Qcur = ggml_rope_ext(
  9941. ctx0, Qcur, inp_pos, rope_factors,
  9942. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9943. ext_factor, attn_factor, beta_fast, beta_slow
  9944. );
  9945. Kcur = ggml_rope_ext(
  9946. ctx0, Kcur, inp_pos, rope_factors,
  9947. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9948. ext_factor, attn_factor, beta_fast, beta_slow
  9949. );
  9950. }
  9951. cb(Qcur, "Qcur", il);
  9952. cb(Kcur, "Kcur", il);
  9953. cb(Vcur, "Vcur", il);
  9954. cur = build_attn(inp_attn, gf,
  9955. model.layers[il].wo, model.layers[il].bo,
  9956. Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il);
  9957. cb(cur, "attn_out", il);
  9958. }
  9959. if (il == n_layer - 1) {
  9960. // skip computing output for unused tokens
  9961. ggml_tensor * inp_out_ids = build_inp_out_ids();
  9962. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  9963. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  9964. }
  9965. // For Granite architectures - scale residual
  9966. cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
  9967. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  9968. cb(ffn_inp, "ffn_inp", il);
  9969. // feed-forward network (non-MoE)
  9970. if (model.layers[il].ffn_gate_inp == nullptr) {
  9971. cur = build_norm(ffn_inp,
  9972. model.layers[il].ffn_norm, NULL,
  9973. LLM_NORM_RMS, il);
  9974. cb(cur, "ffn_norm", il);
  9975. cur = build_ffn(cur,
  9976. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  9977. model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
  9978. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  9979. NULL,
  9980. LLM_FFN_SILU, LLM_FFN_PAR, il);
  9981. cb(cur, "ffn_out", il);
  9982. } else {
  9983. // MoE branch
  9984. cur = build_norm(ffn_inp,
  9985. model.layers[il].ffn_norm, NULL,
  9986. LLM_NORM_RMS, il);
  9987. cb(cur, "ffn_norm", il);
  9988. ggml_tensor * moe_out = build_moe_ffn(cur,
  9989. model.layers[il].ffn_gate_inp,
  9990. model.layers[il].ffn_up_exps,
  9991. model.layers[il].ffn_gate_exps,
  9992. model.layers[il].ffn_down_exps,
  9993. nullptr,
  9994. n_expert, n_expert_used,
  9995. LLM_FFN_SILU, true,
  9996. false, 0.0,
  9997. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  9998. il);
  9999. cb(moe_out, "ffn_moe_out", il);
  10000. // For Granite MoE Shared
  10001. if (hparams.n_ff_shexp > 0) {
  10002. ggml_tensor * ffn_shexp = build_ffn(cur,
  10003. model.layers[il].ffn_up_shexp, NULL, NULL,
  10004. model.layers[il].ffn_gate_shexp, NULL, NULL,
  10005. model.layers[il].ffn_down_shexp, NULL, NULL,
  10006. NULL,
  10007. LLM_FFN_SILU, LLM_FFN_PAR, il);
  10008. cb(ffn_shexp, "ffn_shexp", il);
  10009. cur = ggml_add(ctx0, moe_out, ffn_shexp);
  10010. cb(cur, "ffn_out", il);
  10011. } else {
  10012. cur = moe_out;
  10013. }
  10014. }
  10015. // For Granite architectures - scale residual
  10016. cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
  10017. cur = ggml_add(ctx0, cur, ffn_inp);
  10018. cb(cur, "ffn_out", il);
  10019. cur = build_cvec(cur, il);
  10020. cb(cur, "l_out", il);
  10021. // input for next layer
  10022. inpL = cur;
  10023. }
  10024. cur = inpL;
  10025. cur = build_norm(cur,
  10026. model.output_norm, NULL,
  10027. LLM_NORM_RMS, -1);
  10028. cb(cur, "result_norm", -1);
  10029. res->t_embd = cur;
  10030. // lm_head
  10031. cur = build_lora_mm(model.output, cur);
  10032. // For Granite architectures - scale logits
  10033. cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale);
  10034. cb(cur, "result_output", -1);
  10035. res->t_logits = cur;
  10036. ggml_build_forward_expand(gf, cur);
  10037. }
  10038. };
  10039. // ref: https://github.com/facebookresearch/chameleon
  10040. // based on the original build_llama() function, changes:
  10041. // * qk-norm
  10042. // * swin-norm
  10043. // * removed bias
  10044. // * removed MoE
  10045. struct llm_build_chameleon : public llm_graph_context {
  10046. llm_build_chameleon(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  10047. const int64_t n_embd_head = hparams.n_embd_head_v;
  10048. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  10049. GGML_ASSERT(n_embd_head == hparams.n_rot);
  10050. ggml_tensor * cur;
  10051. ggml_tensor * inpL;
  10052. inpL = build_inp_embd(model.tok_embd);
  10053. // inp_pos - contains the positions
  10054. ggml_tensor * inp_pos = build_inp_pos();
  10055. auto * inp_attn = build_attn_inp_kv_unified();
  10056. for (int il = 0; il < n_layer; ++il) {
  10057. ggml_tensor * inpSA = inpL;
  10058. // norm
  10059. if (hparams.swin_norm) {
  10060. cur = inpL;
  10061. } else {
  10062. cur = build_norm(inpL,
  10063. model.layers[il].attn_norm, NULL,
  10064. LLM_NORM_RMS, il);
  10065. cb(cur, "attn_norm", il);
  10066. }
  10067. // self-attention
  10068. {
  10069. // compute Q and K and RoPE them
  10070. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  10071. cb(Qcur, "Qcur", il);
  10072. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  10073. cb(Kcur, "Kcur", il);
  10074. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  10075. cb(Vcur, "Vcur", il);
  10076. if (model.layers[il].attn_q_norm) {
  10077. Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens,
  10078. ggml_element_size(Qcur) * n_embd_head,
  10079. ggml_element_size(Qcur) * n_embd_head * n_head,
  10080. 0);
  10081. cb(Qcur, "Qcur", il);
  10082. Qcur = build_norm(Qcur,
  10083. model.layers[il].attn_q_norm,
  10084. model.layers[il].attn_q_norm_b,
  10085. LLM_NORM, il);
  10086. cb(Qcur, "Qcur", il);
  10087. }
  10088. if (model.layers[il].attn_k_norm) {
  10089. Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens,
  10090. ggml_element_size(Kcur) * n_embd_head,
  10091. ggml_element_size(Kcur) * n_embd_head * n_head_kv,
  10092. 0);
  10093. cb(Kcur, "Kcur", il);
  10094. Kcur = build_norm(Kcur,
  10095. model.layers[il].attn_k_norm,
  10096. model.layers[il].attn_k_norm_b,
  10097. LLM_NORM, il);
  10098. cb(Kcur, "Kcur", il);
  10099. }
  10100. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  10101. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  10102. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  10103. Qcur = ggml_rope_ext(
  10104. ctx0, Qcur, inp_pos, nullptr,
  10105. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  10106. ext_factor, attn_factor, beta_fast, beta_slow
  10107. );
  10108. Kcur = ggml_rope_ext(
  10109. ctx0, Kcur, inp_pos, nullptr,
  10110. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  10111. ext_factor, attn_factor, beta_fast, beta_slow
  10112. );
  10113. cb(Qcur, "Qcur", il);
  10114. cb(Kcur, "Kcur", il);
  10115. cb(Vcur, "Vcur", il);
  10116. cur = build_attn(inp_attn, gf,
  10117. model.layers[il].wo, nullptr,
  10118. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  10119. if (hparams.swin_norm) {
  10120. cur = build_norm(cur,
  10121. model.layers[il].attn_norm, NULL,
  10122. LLM_NORM_RMS, il);
  10123. }
  10124. }
  10125. if (il == n_layer - 1) {
  10126. // skip computing output for unused tokens
  10127. ggml_tensor * inp_out_ids = build_inp_out_ids();
  10128. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  10129. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  10130. }
  10131. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  10132. cb(ffn_inp, "ffn_inp", il);
  10133. // feed-forward network
  10134. if (!hparams.swin_norm) {
  10135. cur = build_norm(ffn_inp,
  10136. model.layers[il].ffn_norm, NULL,
  10137. LLM_NORM_RMS, il);
  10138. cb(cur, "ffn_norm", il);
  10139. }
  10140. cur = build_ffn(cur,
  10141. model.layers[il].ffn_up, NULL, NULL,
  10142. model.layers[il].ffn_gate, NULL, NULL,
  10143. model.layers[il].ffn_down, NULL, NULL,
  10144. NULL,
  10145. LLM_FFN_SILU, LLM_FFN_PAR, il);
  10146. cb(cur, "ffn_out", il);
  10147. if (hparams.swin_norm) {
  10148. cur = build_norm(cur,
  10149. model.layers[il].ffn_norm, NULL,
  10150. LLM_NORM_RMS, il);
  10151. cb(cur, "ffn_norm", il);
  10152. }
  10153. cur = ggml_add(ctx0, cur, ffn_inp);
  10154. cb(cur, "ffn_out", il);
  10155. cur = build_cvec(cur, il);
  10156. cb(cur, "l_out", il);
  10157. // input for next layer
  10158. inpL = cur;
  10159. }
  10160. cur = inpL;
  10161. cur = build_norm(cur,
  10162. model.output_norm, NULL,
  10163. LLM_NORM_RMS, -1);
  10164. cb(cur, "result_norm", -1);
  10165. res->t_embd = cur;
  10166. // lm_head
  10167. cur = build_lora_mm(model.output, cur);
  10168. cb(cur, "result_output_with_img_logits", -1);
  10169. // TODO: this suppresses the output of image tokens, which is required to enable text-only outputs.
  10170. // Needs to be removed once image outputs are supported.
  10171. int img_token_end_idx = 8196;
  10172. int img_token_start_idx = 4;
  10173. int num_img_tokens = img_token_end_idx - img_token_start_idx;
  10174. // creates 1d tensor of size num_img_tokens and values -FLT_MAX,
  10175. // which ensures that text token values are always at least larger than image token values
  10176. ggml_tensor * img_logits = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, num_img_tokens);
  10177. img_logits = ggml_clamp(ctx0, img_logits, -FLT_MAX, -FLT_MAX);
  10178. cb(img_logits, "img_logits", -1);
  10179. cur = ggml_set_1d(ctx0, cur, img_logits, ggml_element_size(cur) * img_token_start_idx);
  10180. cb(cur, "result_output", -1);
  10181. res->t_logits = cur;
  10182. ggml_build_forward_expand(gf, cur);
  10183. }
  10184. };
  10185. struct llm_build_wavtokenizer_dec : public llm_graph_context {
  10186. llm_build_wavtokenizer_dec(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  10187. ggml_tensor * cur;
  10188. ggml_tensor * inpL;
  10189. inpL = build_inp_embd(model.tok_embd);
  10190. cur = ggml_cont(ctx0, ggml_transpose(ctx0, inpL));
  10191. cur = ggml_conv_1d_ph(ctx0, model.conv1d, cur, 1, 1);
  10192. cur = ggml_add(ctx0, cur, model.conv1d_b);
  10193. // posnet
  10194. for (uint32_t il = 0; il < hparams.posnet.n_layer; ++il) {
  10195. const auto & layer = model.layers[il].posnet;
  10196. inpL = cur;
  10197. switch (il) {
  10198. case 0:
  10199. case 1:
  10200. case 3:
  10201. case 4:
  10202. {
  10203. cur = build_norm(cur,
  10204. layer.norm1,
  10205. layer.norm1_b,
  10206. LLM_NORM_GROUP, 0);
  10207. cur = ggml_mul(ctx0, ggml_sigmoid(ctx0, cur), cur);
  10208. cur = ggml_conv_1d_ph(ctx0, layer.conv1, cur, 1, 1);
  10209. cur = ggml_add(ctx0, cur, layer.conv1_b);
  10210. cur = build_norm(cur,
  10211. layer.norm2,
  10212. layer.norm2_b,
  10213. LLM_NORM_GROUP, 0);
  10214. cur = ggml_mul(ctx0, ggml_sigmoid(ctx0, cur), cur);
  10215. cur = ggml_conv_1d_ph(ctx0, layer.conv2, cur, 1, 1);
  10216. cur = ggml_add(ctx0, cur, layer.conv2_b);
  10217. cur = ggml_add(ctx0, cur, inpL);
  10218. } break;
  10219. case 2:
  10220. {
  10221. cur = build_norm(cur,
  10222. layer.attn_norm,
  10223. layer.attn_norm_b,
  10224. LLM_NORM_GROUP, 0);
  10225. ggml_tensor * q;
  10226. ggml_tensor * k;
  10227. ggml_tensor * v;
  10228. q = ggml_conv_1d_ph(ctx0, layer.attn_q, cur, 1, 1);
  10229. k = ggml_conv_1d_ph(ctx0, layer.attn_k, cur, 1, 1);
  10230. v = ggml_conv_1d_ph(ctx0, layer.attn_v, cur, 1, 1);
  10231. q = ggml_add(ctx0, q, layer.attn_q_b);
  10232. k = ggml_add(ctx0, k, layer.attn_k_b);
  10233. v = ggml_add(ctx0, v, layer.attn_v_b);
  10234. q = ggml_cont(ctx0, ggml_transpose(ctx0, q));
  10235. k = ggml_cont(ctx0, ggml_transpose(ctx0, k));
  10236. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  10237. kq = ggml_soft_max_ext(ctx0, kq, nullptr, 1.0f/sqrtf(float(hparams.posnet.n_embd)), 0.0f);
  10238. cur = ggml_mul_mat(ctx0, kq, v);
  10239. cur = ggml_conv_1d_ph(ctx0, layer.attn_o, cur, 1, 1);
  10240. cur = ggml_add(ctx0, cur, layer.attn_o_b);
  10241. cur = ggml_add(ctx0, cur, inpL);
  10242. } break;
  10243. case 5:
  10244. {
  10245. cur = build_norm(cur,
  10246. layer.norm,
  10247. layer.norm_b,
  10248. LLM_NORM_GROUP, 0);
  10249. } break;
  10250. default: GGML_ABORT("unknown posnet layer");
  10251. };
  10252. }
  10253. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  10254. cur = build_norm(cur,
  10255. model.tok_norm,
  10256. model.tok_norm_b,
  10257. LLM_NORM, -1);
  10258. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  10259. inpL = cur;
  10260. // convnext
  10261. for (uint32_t il = 0; il < hparams.convnext.n_layer; ++il) {
  10262. const auto & layer = model.layers[il].convnext;
  10263. cur = inpL;
  10264. cur = ggml_conv_1d_dw_ph(ctx0, layer.dw, cur, 1, 1);
  10265. cur = ggml_add(ctx0, cur, layer.dw_b);
  10266. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  10267. cur = build_norm(cur,
  10268. layer.norm,
  10269. layer.norm_b,
  10270. LLM_NORM, -1);
  10271. cur = build_ffn(cur,
  10272. layer.pw1, layer.pw1_b, NULL,
  10273. NULL, NULL, NULL,
  10274. layer.pw2, layer.pw2_b, NULL,
  10275. NULL,
  10276. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  10277. cur = ggml_mul(ctx0, cur, layer.gamma);
  10278. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  10279. inpL = ggml_add(ctx0, cur, inpL);
  10280. }
  10281. cur = inpL;
  10282. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  10283. cur = build_norm(cur,
  10284. model.output_norm,
  10285. model.output_norm_b,
  10286. LLM_NORM, -1);
  10287. // lm_head
  10288. cur = build_lora_mm(model.output, cur);
  10289. cur = ggml_add(ctx0, cur, model.output_b);
  10290. cb(cur, "result_embd", -1);
  10291. res->t_embd = cur;
  10292. ggml_build_forward_expand(gf, cur);
  10293. }
  10294. };
  10295. struct llm_build_plm : public llm_graph_context {
  10296. llm_build_plm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  10297. const float kq_scale = 1.0f/sqrtf(float(hparams.n_embd_head_k));
  10298. const uint32_t n_embd_head_qk_rope = hparams.n_rot;
  10299. const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  10300. const uint32_t kv_lora_rank = hparams.n_lora_kv;
  10301. ggml_tensor * cur;
  10302. ggml_tensor * inpL;
  10303. // {n_embd, n_tokens}
  10304. inpL = build_inp_embd(model.tok_embd);
  10305. // inp_pos - contains the positions
  10306. ggml_tensor * inp_pos = build_inp_pos();
  10307. auto * inp_attn = build_attn_inp_kv_unified();
  10308. for (int il = 0; il < n_layer; ++il) {
  10309. ggml_tensor * inpSA = inpL;
  10310. // norm
  10311. cur = build_norm(inpL,
  10312. model.layers[il].attn_norm, NULL,
  10313. LLM_NORM_RMS, il);
  10314. cb(cur, "attn_norm", il);
  10315. // self_attention
  10316. {
  10317. ggml_tensor * q = NULL;
  10318. q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  10319. cb(q, "q", il);
  10320. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  10321. ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
  10322. ggml_row_size(q->type, hparams.n_embd_head_k),
  10323. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  10324. 0);
  10325. cb(q_nope, "q_nope", il);
  10326. // and {n_head * n_embd_head_qk_rope, n_tokens}
  10327. ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
  10328. ggml_row_size(q->type, hparams.n_embd_head_k),
  10329. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  10330. ggml_row_size(q->type, n_embd_head_qk_nope));
  10331. cb(q_pe, "q_pe", il);
  10332. // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
  10333. ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
  10334. cb(kv_pe_compresseed, "kv_pe_compresseed", il);
  10335. // split into {kv_lora_rank, n_tokens}
  10336. ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
  10337. kv_pe_compresseed->nb[1],
  10338. 0);
  10339. cb(kv_compressed, "kv_compressed", il);
  10340. // and {n_embd_head_qk_rope, n_tokens}
  10341. ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
  10342. kv_pe_compresseed->nb[1],
  10343. kv_pe_compresseed->nb[1],
  10344. ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
  10345. cb(k_pe, "k_pe", il);
  10346. kv_compressed = build_norm(kv_compressed,
  10347. model.layers[il].attn_kv_a_norm, NULL,
  10348. LLM_NORM_RMS, il);
  10349. cb(kv_compressed, "kv_compressed", il);
  10350. // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
  10351. ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
  10352. cb(kv, "kv", il);
  10353. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  10354. ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
  10355. ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
  10356. ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  10357. 0);
  10358. cb(k_nope, "k_nope", il);
  10359. // and {n_head * n_embd_head_v, n_tokens}
  10360. ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
  10361. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  10362. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
  10363. ggml_row_size(kv->type, (n_embd_head_qk_nope)));
  10364. cb(v_states, "v_states", il);
  10365. v_states = ggml_cont(ctx0, v_states);
  10366. cb(v_states, "v_states", il);
  10367. v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,
  10368. ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),
  10369. 0);
  10370. cb(v_states, "v_states", il);
  10371. q_pe = ggml_rope_ext(
  10372. ctx0, q_pe, inp_pos, nullptr,
  10373. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  10374. ext_factor, attn_factor, beta_fast, beta_slow
  10375. );
  10376. cb(q_pe, "q_pe", il);
  10377. // shared RoPE key
  10378. k_pe = ggml_rope_ext(
  10379. ctx0, k_pe, inp_pos, nullptr,
  10380. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  10381. ext_factor, attn_factor, beta_fast, beta_slow
  10382. );
  10383. cb(k_pe, "k_pe", il);
  10384. ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);
  10385. cb(q_states, "q_states", il);
  10386. ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
  10387. cb(k_states, "k_states", il);
  10388. cur = build_attn(inp_attn, gf,
  10389. model.layers[il].wo, NULL,
  10390. q_states, k_states, v_states, nullptr, nullptr, kq_scale, il);
  10391. }
  10392. if (il == n_layer - 1) {
  10393. // skip computing output for unused tokens
  10394. ggml_tensor * inp_out_ids = build_inp_out_ids();
  10395. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  10396. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  10397. }
  10398. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  10399. cb(ffn_inp, "ffn_inp", il);
  10400. cur = build_norm(ffn_inp,
  10401. model.layers[il].ffn_norm, NULL,
  10402. LLM_NORM_RMS, il);
  10403. cb(cur, "ffn_norm", il);
  10404. cur = build_ffn(cur,
  10405. model.layers[il].ffn_up, NULL, NULL,
  10406. NULL, NULL, NULL,
  10407. model.layers[il].ffn_down, NULL, NULL,
  10408. NULL,
  10409. LLM_FFN_RELU_SQR, LLM_FFN_SEQ, il);
  10410. cb(cur, "ffn_out", il);
  10411. cur = ggml_add(ctx0, cur, ffn_inp);
  10412. cur = build_cvec(cur, il);
  10413. cb(cur, "l_out", il);
  10414. // input for next layer
  10415. inpL = cur;
  10416. }
  10417. cur = inpL;
  10418. cur = build_norm(cur,
  10419. model.output_norm, NULL,
  10420. LLM_NORM_RMS, -1);
  10421. cb(cur, "result_norm", -1);
  10422. res->t_embd = cur;
  10423. cur = build_lora_mm(model.output, cur);
  10424. cb(cur, "result_output", -1);
  10425. res->t_logits = cur;
  10426. ggml_build_forward_expand(gf, cur);
  10427. }
  10428. };
  10429. struct llm_build_bailingmoe : public llm_graph_context {
  10430. llm_build_bailingmoe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  10431. ggml_tensor * cur;
  10432. ggml_tensor * inpL;
  10433. inpL = build_inp_embd(model.tok_embd);
  10434. // inp_pos - contains the positions
  10435. ggml_tensor * inp_pos = build_inp_pos();
  10436. auto * inp_attn = build_attn_inp_kv_unified();
  10437. for (int il = 0; il < n_layer; ++il) {
  10438. ggml_tensor * inpSA = inpL;
  10439. // norm
  10440. cur = build_norm(inpL,
  10441. model.layers[il].attn_norm, NULL,
  10442. LLM_NORM_RMS, il);
  10443. cb(cur, "attn_norm", il);
  10444. // self-attention
  10445. {
  10446. // rope freq factors for llama3; may return nullptr for llama2 and other models
  10447. ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
  10448. // compute Q and K and RoPE them
  10449. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  10450. cb(Qcur, "Qcur", il);
  10451. if (model.layers[il].bq) {
  10452. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  10453. cb(Qcur, "Qcur", il);
  10454. }
  10455. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  10456. cb(Kcur, "Kcur", il);
  10457. if (model.layers[il].bk) {
  10458. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  10459. cb(Kcur, "Kcur", il);
  10460. }
  10461. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  10462. cb(Vcur, "Vcur", il);
  10463. if (model.layers[il].bv) {
  10464. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  10465. cb(Vcur, "Vcur", il);
  10466. }
  10467. Qcur = ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens);
  10468. Kcur = ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens);
  10469. Vcur = ggml_reshape_3d(ctx0, Vcur, n_rot, n_head_kv, n_tokens);
  10470. Qcur = ggml_rope_ext(
  10471. ctx0, Qcur, inp_pos, rope_factors,
  10472. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  10473. ext_factor, attn_factor, beta_fast, beta_slow
  10474. );
  10475. Kcur = ggml_rope_ext(
  10476. ctx0, Kcur, inp_pos, rope_factors,
  10477. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  10478. ext_factor, attn_factor, beta_fast, beta_slow
  10479. );
  10480. cb(Qcur, "Qcur", il);
  10481. cb(Kcur, "Kcur", il);
  10482. cb(Vcur, "Vcur", il);
  10483. cur = build_attn(inp_attn, gf,
  10484. model.layers[il].wo, model.layers[il].bo,
  10485. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_rot)), il);
  10486. }
  10487. if (il == n_layer - 1) {
  10488. // skip computing output for unused tokens
  10489. ggml_tensor * inp_out_ids = build_inp_out_ids();
  10490. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  10491. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  10492. }
  10493. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  10494. cb(ffn_inp, "ffn_inp", il);
  10495. cur = build_norm(ffn_inp,
  10496. model.layers[il].ffn_norm, NULL,
  10497. LLM_NORM_RMS, il);
  10498. cb(cur, "ffn_norm", il);
  10499. ggml_tensor * moe_out =
  10500. build_moe_ffn(cur,
  10501. model.layers[il].ffn_gate_inp,
  10502. model.layers[il].ffn_up_exps,
  10503. model.layers[il].ffn_gate_exps,
  10504. model.layers[il].ffn_down_exps,
  10505. nullptr,
  10506. n_expert, n_expert_used,
  10507. LLM_FFN_SILU, hparams.expert_weights_norm,
  10508. false, hparams.expert_weights_scale,
  10509. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  10510. il);
  10511. cb(moe_out, "ffn_moe_out", il);
  10512. // FFN shared expert
  10513. {
  10514. ggml_tensor * ffn_shexp = build_ffn(cur,
  10515. model.layers[il].ffn_up_shexp, NULL, NULL,
  10516. model.layers[il].ffn_gate_shexp, NULL, NULL,
  10517. model.layers[il].ffn_down_shexp, NULL, NULL,
  10518. NULL,
  10519. LLM_FFN_SILU, LLM_FFN_PAR, il);
  10520. cb(ffn_shexp, "ffn_shexp", il);
  10521. cur = ggml_add(ctx0, moe_out, ffn_shexp);
  10522. cb(cur, "ffn_out", il);
  10523. }
  10524. cur = ggml_add(ctx0, cur, ffn_inp);
  10525. cur = build_cvec(cur, il);
  10526. cb(cur, "l_out", il);
  10527. // input for next layer
  10528. inpL = cur;
  10529. }
  10530. cur = inpL;
  10531. cur = build_norm(cur,
  10532. model.output_norm, NULL,
  10533. LLM_NORM_RMS, -1);
  10534. cb(cur, "result_norm", -1);
  10535. res->t_embd = cur;
  10536. // lm_head
  10537. cur = build_lora_mm(model.output, cur);
  10538. cb(cur, "result_output", -1);
  10539. res->t_logits = cur;
  10540. ggml_build_forward_expand(gf, cur);
  10541. }
  10542. };
  10543. struct llm_build_dots1 : public llm_graph_context {
  10544. llm_build_dots1(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  10545. const int64_t n_embd_head = hparams.n_embd_head_v;
  10546. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  10547. GGML_ASSERT(n_embd_head == hparams.n_rot);
  10548. ggml_tensor * cur;
  10549. ggml_tensor * inpL;
  10550. inpL = build_inp_embd(model.tok_embd);
  10551. // inp_pos - contains the positions
  10552. ggml_tensor * inp_pos = build_inp_pos();
  10553. auto * inp_attn = build_attn_inp_kv_unified();
  10554. for (int il = 0; il < n_layer; ++il) {
  10555. ggml_tensor * inpSA = inpL;
  10556. // norm
  10557. cur = build_norm(inpL,
  10558. model.layers[il].attn_norm, NULL,
  10559. LLM_NORM_RMS, il);
  10560. cb(cur, "attn_norm", il);
  10561. // self_attention
  10562. {
  10563. // compute Q and K and RoPE them
  10564. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  10565. cb(Qcur, "Qcur", il);
  10566. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  10567. cb(Kcur, "Kcur", il);
  10568. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  10569. cb(Vcur, "Vcur", il);
  10570. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  10571. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  10572. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  10573. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
  10574. cb(Qcur, "Qcur_normed", il);
  10575. Qcur = ggml_rope_ext(
  10576. ctx0, Qcur, inp_pos, nullptr,
  10577. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  10578. ext_factor, attn_factor, beta_fast, beta_slow
  10579. );
  10580. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
  10581. cb(Kcur, "Kcur_normed", il);
  10582. Kcur = ggml_rope_ext(
  10583. ctx0, Kcur, inp_pos, nullptr,
  10584. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  10585. ext_factor, attn_factor, beta_fast, beta_slow
  10586. );
  10587. cb(Qcur, "Qcur", il);
  10588. cb(Kcur, "Kcur", il);
  10589. cb(Vcur, "Vcur", il);
  10590. cur = build_attn(inp_attn, gf,
  10591. model.layers[il].wo, model.layers[il].bo,
  10592. Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  10593. }
  10594. if (il == n_layer - 1) {
  10595. // skip computing output for unused tokens
  10596. ggml_tensor * inp_out_ids = build_inp_out_ids();
  10597. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  10598. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  10599. }
  10600. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  10601. cb(ffn_inp, "ffn_inp", il);
  10602. // MoE branch
  10603. cur = build_norm(ffn_inp,
  10604. model.layers[il].ffn_norm, NULL,
  10605. LLM_NORM_RMS, il);
  10606. cb(cur, "ffn_norm", il);
  10607. if ((uint32_t) il < hparams.n_layer_dense_lead) {
  10608. cur = build_ffn(cur,
  10609. model.layers[il].ffn_up, NULL, NULL,
  10610. model.layers[il].ffn_gate, NULL, NULL,
  10611. model.layers[il].ffn_down, NULL, NULL,
  10612. NULL,
  10613. LLM_FFN_SILU, LLM_FFN_PAR, il);
  10614. cb(cur, "ffn_out", il);
  10615. } else {
  10616. ggml_tensor * moe_out =
  10617. build_moe_ffn(cur,
  10618. model.layers[il].ffn_gate_inp,
  10619. model.layers[il].ffn_up_exps,
  10620. model.layers[il].ffn_gate_exps,
  10621. model.layers[il].ffn_down_exps,
  10622. model.layers[il].ffn_exp_probs_b,
  10623. n_expert, n_expert_used,
  10624. LLM_FFN_SILU, hparams.expert_weights_norm,
  10625. true, hparams.expert_weights_scale,
  10626. (llama_expert_gating_func_type) hparams.expert_gating_func,
  10627. il);
  10628. cb(moe_out, "ffn_moe_out", il);
  10629. {
  10630. ggml_tensor * ffn_shexp = build_ffn(cur,
  10631. model.layers[il].ffn_up_shexp, NULL, NULL,
  10632. model.layers[il].ffn_gate_shexp, NULL, NULL,
  10633. model.layers[il].ffn_down_shexp, NULL, NULL,
  10634. NULL,
  10635. LLM_FFN_SILU, LLM_FFN_PAR, il);
  10636. cb(ffn_shexp, "ffn_shexp", il);
  10637. cur = ggml_add(ctx0, moe_out, ffn_shexp);
  10638. cb(cur, "ffn_out", il);
  10639. }
  10640. }
  10641. cur = ggml_add(ctx0, cur, ffn_inp);
  10642. cur = build_cvec(cur, il);
  10643. cb(cur, "l_out", il);
  10644. // input for next layer
  10645. inpL = cur;
  10646. }
  10647. cur = inpL;
  10648. cur = build_norm(cur,
  10649. model.output_norm, NULL,
  10650. LLM_NORM_RMS, -1);
  10651. cb(cur, "result_norm", -1);
  10652. res->t_embd = cur;
  10653. // lm_head
  10654. cur = build_lora_mm(model.output, cur);
  10655. cb(cur, "result_output", -1);
  10656. res->t_logits = cur;
  10657. ggml_build_forward_expand(gf, cur);
  10658. }
  10659. };
  10660. llama_memory_i * llama_model::create_memory(const llama_memory_params & params, llama_cparams & cparams) const {
  10661. llama_memory_i * res;
  10662. switch (arch) {
  10663. case LLM_ARCH_BERT:
  10664. case LLM_ARCH_JINA_BERT_V2:
  10665. case LLM_ARCH_NOMIC_BERT:
  10666. case LLM_ARCH_NOMIC_BERT_MOE:
  10667. case LLM_ARCH_WAVTOKENIZER_DEC:
  10668. {
  10669. res = nullptr;
  10670. } break;
  10671. case LLM_ARCH_MAMBA:
  10672. case LLM_ARCH_RWKV6:
  10673. case LLM_ARCH_RWKV6QWEN2:
  10674. case LLM_ARCH_RWKV7:
  10675. case LLM_ARCH_ARWKV7:
  10676. {
  10677. res = new llama_kv_cache_recurrent(
  10678. *this,
  10679. GGML_TYPE_F32,
  10680. GGML_TYPE_F32,
  10681. cparams.offload_kqv,
  10682. std::max((uint32_t) 1, cparams.n_seq_max),
  10683. cparams.n_seq_max);
  10684. } break;
  10685. default:
  10686. {
  10687. const auto padding = llama_kv_cache_unified::get_padding(cparams);
  10688. cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding);
  10689. LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx);
  10690. if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
  10691. GGML_ASSERT(hparams.is_swa_any());
  10692. res = new llama_kv_cache_unified_iswa(
  10693. *this,
  10694. params.type_k,
  10695. params.type_v,
  10696. !cparams.flash_attn,
  10697. cparams.offload_kqv,
  10698. params.swa_full,
  10699. cparams.n_ctx,
  10700. cparams.n_seq_max,
  10701. cparams.n_ubatch,
  10702. padding);
  10703. } else {
  10704. GGML_ASSERT(!hparams.is_swa_any());
  10705. res = new llama_kv_cache_unified(
  10706. *this,
  10707. nullptr,
  10708. params.type_k,
  10709. params.type_v,
  10710. !cparams.flash_attn,
  10711. cparams.offload_kqv,
  10712. cparams.n_ctx,
  10713. cparams.n_seq_max,
  10714. padding,
  10715. hparams.n_swa,
  10716. hparams.swa_type);
  10717. }
  10718. }
  10719. }
  10720. return res;
  10721. }
  10722. llm_graph_result_ptr llama_model::build_graph(
  10723. const llm_graph_params & params,
  10724. ggml_cgraph * gf,
  10725. llm_graph_type type) const {
  10726. std::unique_ptr<llm_graph_context> llm;
  10727. switch (arch) {
  10728. case LLM_ARCH_LLAMA:
  10729. {
  10730. llm = std::make_unique<llm_build_llama>(*this, params, gf);
  10731. } break;
  10732. case LLM_ARCH_LLAMA4:
  10733. {
  10734. llm = std::make_unique<llm_build_llama_iswa>(*this, params, gf);
  10735. } break;
  10736. case LLM_ARCH_DECI:
  10737. {
  10738. llm = std::make_unique<llm_build_deci>(*this, params, gf);
  10739. } break;
  10740. case LLM_ARCH_BAICHUAN:
  10741. {
  10742. llm = std::make_unique<llm_build_baichuan>(*this, params, gf);
  10743. } break;
  10744. case LLM_ARCH_FALCON:
  10745. {
  10746. llm = std::make_unique<llm_build_falcon>(*this, params, gf);
  10747. } break;
  10748. case LLM_ARCH_GROK:
  10749. {
  10750. llm = std::make_unique<llm_build_grok>(*this, params, gf);
  10751. } break;
  10752. case LLM_ARCH_STARCODER:
  10753. {
  10754. llm = std::make_unique<llm_build_starcoder>(*this, params, gf);
  10755. } break;
  10756. case LLM_ARCH_REFACT:
  10757. {
  10758. llm = std::make_unique<llm_build_refact>(*this, params, gf);
  10759. } break;
  10760. case LLM_ARCH_BERT:
  10761. case LLM_ARCH_JINA_BERT_V2:
  10762. case LLM_ARCH_NOMIC_BERT:
  10763. case LLM_ARCH_NOMIC_BERT_MOE:
  10764. {
  10765. llm = std::make_unique<llm_build_bert>(*this, params, gf);
  10766. } break;
  10767. case LLM_ARCH_BLOOM:
  10768. {
  10769. llm = std::make_unique<llm_build_bloom>(*this, params, gf);
  10770. } break;
  10771. case LLM_ARCH_MPT:
  10772. {
  10773. llm = std::make_unique<llm_build_mpt>(*this, params, gf);
  10774. } break;
  10775. case LLM_ARCH_STABLELM:
  10776. {
  10777. llm = std::make_unique<llm_build_stablelm>(*this, params, gf);
  10778. } break;
  10779. case LLM_ARCH_QWEN:
  10780. {
  10781. llm = std::make_unique<llm_build_qwen>(*this, params, gf);
  10782. } break;
  10783. case LLM_ARCH_QWEN2:
  10784. {
  10785. llm = std::make_unique<llm_build_qwen2>(*this, params, gf);
  10786. } break;
  10787. case LLM_ARCH_QWEN2VL:
  10788. {
  10789. llm = std::make_unique<llm_build_qwen2vl>(*this, params, gf);
  10790. } break;
  10791. case LLM_ARCH_QWEN2MOE:
  10792. {
  10793. llm = std::make_unique<llm_build_qwen2moe>(*this, params, gf);
  10794. } break;
  10795. case LLM_ARCH_QWEN3:
  10796. {
  10797. llm = std::make_unique<llm_build_qwen3>(*this, params, gf);
  10798. } break;
  10799. case LLM_ARCH_QWEN3MOE:
  10800. {
  10801. llm = std::make_unique<llm_build_qwen3moe>(*this, params, gf);
  10802. } break;
  10803. case LLM_ARCH_PHI2:
  10804. {
  10805. llm = std::make_unique<llm_build_phi2>(*this, params, gf);
  10806. } break;
  10807. case LLM_ARCH_PHI3:
  10808. case LLM_ARCH_PHIMOE:
  10809. {
  10810. if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
  10811. llm = std::make_unique<llm_build_phi3<true>> (*this, params, gf);
  10812. } else {
  10813. llm = std::make_unique<llm_build_phi3<false>>(*this, params, gf);
  10814. }
  10815. } break;
  10816. case LLM_ARCH_PLAMO:
  10817. {
  10818. llm = std::make_unique<llm_build_plamo>(*this, params, gf);
  10819. } break;
  10820. case LLM_ARCH_GPT2:
  10821. {
  10822. llm = std::make_unique<llm_build_gpt2>(*this, params, gf);
  10823. } break;
  10824. case LLM_ARCH_CODESHELL:
  10825. {
  10826. llm = std::make_unique<llm_build_codeshell>(*this, params, gf);
  10827. } break;
  10828. case LLM_ARCH_ORION:
  10829. {
  10830. llm = std::make_unique<llm_build_orion>(*this, params, gf);
  10831. } break;
  10832. case LLM_ARCH_INTERNLM2:
  10833. {
  10834. llm = std::make_unique<llm_build_internlm2>(*this, params, gf);
  10835. } break;
  10836. case LLM_ARCH_MINICPM3:
  10837. {
  10838. llm = std::make_unique<llm_build_minicpm3>(*this, params, gf);
  10839. } break;
  10840. case LLM_ARCH_GEMMA:
  10841. {
  10842. llm = std::make_unique<llm_build_gemma>(*this, params, gf);
  10843. } break;
  10844. case LLM_ARCH_GEMMA2:
  10845. {
  10846. llm = std::make_unique<llm_build_gemma2_iswa>(*this, params, gf);
  10847. } break;
  10848. case LLM_ARCH_GEMMA3:
  10849. {
  10850. llm = std::make_unique<llm_build_gemma3_iswa>(*this, params, gf);
  10851. } break;
  10852. case LLM_ARCH_STARCODER2:
  10853. {
  10854. llm = std::make_unique<llm_build_starcoder2>(*this, params, gf);
  10855. } break;
  10856. case LLM_ARCH_MAMBA:
  10857. {
  10858. llm = std::make_unique<llm_build_mamba>(*this, params, gf);
  10859. } break;
  10860. case LLM_ARCH_XVERSE:
  10861. {
  10862. llm = std::make_unique<llm_build_xverse>(*this, params, gf);
  10863. } break;
  10864. case LLM_ARCH_COMMAND_R:
  10865. {
  10866. llm = std::make_unique<llm_build_command_r>(*this, params, gf);
  10867. } break;
  10868. case LLM_ARCH_COHERE2:
  10869. {
  10870. llm = std::make_unique<llm_build_cohere2_iswa>(*this, params, gf);
  10871. } break;
  10872. case LLM_ARCH_DBRX:
  10873. {
  10874. llm = std::make_unique<llm_build_dbrx>(*this, params, gf);
  10875. } break;
  10876. case LLM_ARCH_OLMO:
  10877. {
  10878. llm = std::make_unique<llm_build_olmo>(*this, params, gf);
  10879. } break;
  10880. case LLM_ARCH_OLMO2:
  10881. {
  10882. llm = std::make_unique<llm_build_olmo2>(*this, params, gf);
  10883. } break;
  10884. case LLM_ARCH_OLMOE:
  10885. {
  10886. llm = std::make_unique<llm_build_olmoe>(*this, params, gf);
  10887. } break;
  10888. case LLM_ARCH_OPENELM:
  10889. {
  10890. llm = std::make_unique<llm_build_openelm>(*this, params, gf);
  10891. } break;
  10892. case LLM_ARCH_GPTNEOX:
  10893. {
  10894. llm = std::make_unique<llm_build_gptneox>(*this, params, gf);
  10895. } break;
  10896. case LLM_ARCH_ARCTIC:
  10897. {
  10898. llm = std::make_unique<llm_build_arctic>(*this, params, gf);
  10899. } break;
  10900. case LLM_ARCH_DEEPSEEK:
  10901. {
  10902. llm = std::make_unique<llm_build_deepseek>(*this, params, gf);
  10903. } break;
  10904. case LLM_ARCH_DEEPSEEK2:
  10905. {
  10906. llm = std::make_unique<llm_build_deepseek2>(*this, params, gf);
  10907. } break;
  10908. case LLM_ARCH_CHATGLM:
  10909. {
  10910. llm = std::make_unique<llm_build_chatglm>(*this, params, gf);
  10911. } break;
  10912. case LLM_ARCH_GLM4:
  10913. {
  10914. llm = std::make_unique<llm_build_glm4>(*this, params, gf);
  10915. } break;
  10916. case LLM_ARCH_BITNET:
  10917. {
  10918. llm = std::make_unique<llm_build_bitnet>(*this, params, gf);
  10919. } break;
  10920. case LLM_ARCH_T5:
  10921. {
  10922. switch (type) {
  10923. case LLM_GRAPH_TYPE_ENCODER:
  10924. llm = std::make_unique<llm_build_t5_enc>(*this, params, gf);
  10925. break;
  10926. case LLM_GRAPH_TYPE_DEFAULT:
  10927. case LLM_GRAPH_TYPE_DECODER:
  10928. llm = std::make_unique<llm_build_t5_dec>(*this, params, gf);
  10929. break;
  10930. default:
  10931. GGML_ABORT("invalid graph type");
  10932. };
  10933. } break;
  10934. case LLM_ARCH_T5ENCODER:
  10935. {
  10936. llm = std::make_unique<llm_build_t5_enc>(*this, params, gf);
  10937. }
  10938. break;
  10939. case LLM_ARCH_JAIS:
  10940. {
  10941. llm = std::make_unique<llm_build_jais>(*this, params, gf);
  10942. } break;
  10943. case LLM_ARCH_NEMOTRON:
  10944. {
  10945. llm = std::make_unique<llm_build_nemotron>(*this, params, gf);
  10946. } break;
  10947. case LLM_ARCH_EXAONE:
  10948. {
  10949. llm = std::make_unique<llm_build_exaone>(*this, params, gf);
  10950. } break;
  10951. case LLM_ARCH_RWKV6:
  10952. {
  10953. llm = std::make_unique<llm_build_rwkv6>(*this, params, gf);
  10954. } break;
  10955. case LLM_ARCH_RWKV6QWEN2:
  10956. {
  10957. llm = std::make_unique<llm_build_rwkv6qwen2>(*this, params, gf);
  10958. } break;
  10959. case LLM_ARCH_RWKV7:
  10960. {
  10961. llm = std::make_unique<llm_build_rwkv7>(*this, params, gf);
  10962. } break;
  10963. case LLM_ARCH_ARWKV7:
  10964. {
  10965. llm = std::make_unique<llm_build_arwkv7>(*this, params, gf);
  10966. } break;
  10967. case LLM_ARCH_GRANITE:
  10968. case LLM_ARCH_GRANITE_MOE:
  10969. case LLM_ARCH_MINICPM:
  10970. {
  10971. llm = std::make_unique<llm_build_granite>(*this, params, gf);
  10972. } break;
  10973. case LLM_ARCH_CHAMELEON:
  10974. {
  10975. llm = std::make_unique<llm_build_chameleon>(*this, params, gf);
  10976. } break;
  10977. case LLM_ARCH_WAVTOKENIZER_DEC:
  10978. {
  10979. llm = std::make_unique<llm_build_wavtokenizer_dec>(*this, params, gf);
  10980. } break;
  10981. case LLM_ARCH_PLM:
  10982. {
  10983. llm = std::make_unique<llm_build_plm>(*this, params, gf);
  10984. } break;
  10985. case LLM_ARCH_BAILINGMOE:
  10986. {
  10987. llm = std::make_unique<llm_build_bailingmoe>(*this, params, gf);
  10988. } break;
  10989. case LLM_ARCH_DOTS1:
  10990. {
  10991. llm = std::make_unique<llm_build_dots1>(*this, params, gf);
  10992. } break;
  10993. default:
  10994. GGML_ABORT("fatal error");
  10995. }
  10996. // add on pooling layer
  10997. llm->build_pooling(gf, cls, cls_b, cls_out, cls_out_b);
  10998. return std::move(llm->res);
  10999. }
  11000. //
  11001. // interface implementation
  11002. //
  11003. llama_model_params llama_model_default_params() {
  11004. llama_model_params result = {
  11005. /*.devices =*/ nullptr,
  11006. /*.tensor_buft_overrides =*/ nullptr,
  11007. /*.n_gpu_layers =*/ 0,
  11008. /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
  11009. /*.main_gpu =*/ 0,
  11010. /*.tensor_split =*/ nullptr,
  11011. /*.progress_callback =*/ nullptr,
  11012. /*.progress_callback_user_data =*/ nullptr,
  11013. /*.kv_overrides =*/ nullptr,
  11014. /*.vocab_only =*/ false,
  11015. /*.use_mmap =*/ true,
  11016. /*.use_mlock =*/ false,
  11017. /*.check_tensors =*/ false,
  11018. };
  11019. #ifdef GGML_USE_METAL
  11020. // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
  11021. result.n_gpu_layers = 999;
  11022. #endif
  11023. return result;
  11024. }
  11025. const llama_vocab * llama_model_get_vocab(const llama_model * model) {
  11026. return &model->vocab;
  11027. }
  11028. void llama_free_model(llama_model * model) {
  11029. llama_model_free(model);
  11030. }
  11031. void llama_model_free(llama_model * model) {
  11032. delete model;
  11033. }
  11034. int32_t llama_model_n_ctx_train(const llama_model * model) {
  11035. return model->hparams.n_ctx_train;
  11036. }
  11037. int32_t llama_model_n_embd(const llama_model * model) {
  11038. return model->hparams.n_embd;
  11039. }
  11040. int32_t llama_model_n_layer(const llama_model * model) {
  11041. return model->hparams.n_layer;
  11042. }
  11043. int32_t llama_model_n_head(const llama_model * model) {
  11044. return model->hparams.n_head();
  11045. }
  11046. int32_t llama_model_n_head_kv(const llama_model * model) {
  11047. return model->hparams.n_head_kv();
  11048. }
  11049. int32_t llama_model_n_swa(const llama_model * model) {
  11050. return model->hparams.n_swa;
  11051. }
  11052. uint32_t llama_model_n_cls_out(const struct llama_model * model) {
  11053. return model->hparams.n_cls_out;
  11054. }
  11055. const char * llama_model_cls_label(const struct llama_model * model, uint32_t i) {
  11056. if (i < model->classifier_labels.size()) {
  11057. return model->classifier_labels[i].c_str();
  11058. }
  11059. return nullptr;
  11060. }
  11061. // deprecated
  11062. int32_t llama_n_ctx_train(const llama_model * model) {
  11063. return llama_model_n_ctx_train(model);
  11064. }
  11065. // deprecated
  11066. int32_t llama_n_embd(const llama_model * model) {
  11067. return llama_model_n_embd(model);
  11068. }
  11069. // deprecated
  11070. int32_t llama_n_layer(const llama_model * model) {
  11071. return llama_model_n_layer(model);
  11072. }
  11073. // deprecated
  11074. int32_t llama_n_head(const llama_model * model) {
  11075. return llama_model_n_head(model);
  11076. }
  11077. llama_rope_type llama_model_rope_type(const llama_model * model) {
  11078. switch (model->arch) {
  11079. // these models do not use RoPE
  11080. case LLM_ARCH_GPT2:
  11081. case LLM_ARCH_GPTJ:
  11082. case LLM_ARCH_MPT:
  11083. case LLM_ARCH_REFACT:
  11084. case LLM_ARCH_BLOOM:
  11085. case LLM_ARCH_MAMBA:
  11086. case LLM_ARCH_JINA_BERT_V2:
  11087. case LLM_ARCH_T5:
  11088. case LLM_ARCH_T5ENCODER:
  11089. case LLM_ARCH_JAIS:
  11090. case LLM_ARCH_RWKV6:
  11091. case LLM_ARCH_RWKV6QWEN2:
  11092. case LLM_ARCH_RWKV7:
  11093. case LLM_ARCH_ARWKV7:
  11094. case LLM_ARCH_WAVTOKENIZER_DEC:
  11095. return LLAMA_ROPE_TYPE_NONE;
  11096. // use what we call a normal RoPE, operating on pairs of consecutive head values
  11097. case LLM_ARCH_LLAMA:
  11098. case LLM_ARCH_LLAMA4:
  11099. case LLM_ARCH_DECI:
  11100. case LLM_ARCH_BAICHUAN:
  11101. case LLM_ARCH_STARCODER:
  11102. case LLM_ARCH_INTERNLM2:
  11103. case LLM_ARCH_MINICPM:
  11104. case LLM_ARCH_XVERSE:
  11105. case LLM_ARCH_COMMAND_R:
  11106. case LLM_ARCH_COHERE2:
  11107. case LLM_ARCH_OLMO:
  11108. case LLM_ARCH_ARCTIC:
  11109. case LLM_ARCH_DEEPSEEK:
  11110. case LLM_ARCH_DEEPSEEK2:
  11111. case LLM_ARCH_PLM:
  11112. case LLM_ARCH_CHATGLM:
  11113. case LLM_ARCH_GLM4:
  11114. case LLM_ARCH_GRANITE:
  11115. case LLM_ARCH_GRANITE_MOE:
  11116. case LLM_ARCH_CHAMELEON:
  11117. case LLM_ARCH_BAILINGMOE:
  11118. return LLAMA_ROPE_TYPE_NORM;
  11119. // the pairs of head values are offset by n_rot/2
  11120. case LLM_ARCH_FALCON:
  11121. case LLM_ARCH_GROK:
  11122. case LLM_ARCH_DBRX:
  11123. case LLM_ARCH_BERT:
  11124. case LLM_ARCH_NOMIC_BERT:
  11125. case LLM_ARCH_NOMIC_BERT_MOE:
  11126. case LLM_ARCH_STABLELM:
  11127. case LLM_ARCH_BITNET:
  11128. case LLM_ARCH_QWEN:
  11129. case LLM_ARCH_QWEN2:
  11130. case LLM_ARCH_QWEN2MOE:
  11131. case LLM_ARCH_QWEN3:
  11132. case LLM_ARCH_QWEN3MOE:
  11133. case LLM_ARCH_OLMO2:
  11134. case LLM_ARCH_OLMOE:
  11135. case LLM_ARCH_PHI2:
  11136. case LLM_ARCH_PHI3:
  11137. case LLM_ARCH_PHIMOE:
  11138. case LLM_ARCH_PLAMO:
  11139. case LLM_ARCH_GEMMA:
  11140. case LLM_ARCH_GEMMA2:
  11141. case LLM_ARCH_GEMMA3:
  11142. case LLM_ARCH_STARCODER2:
  11143. case LLM_ARCH_OPENELM:
  11144. case LLM_ARCH_GPTNEOX:
  11145. case LLM_ARCH_CODESHELL:
  11146. case LLM_ARCH_ORION:
  11147. case LLM_ARCH_NEMOTRON:
  11148. case LLM_ARCH_EXAONE:
  11149. case LLM_ARCH_MINICPM3:
  11150. case LLM_ARCH_DOTS1:
  11151. return LLAMA_ROPE_TYPE_NEOX;
  11152. case LLM_ARCH_QWEN2VL:
  11153. return LLAMA_ROPE_TYPE_MROPE;
  11154. // all model arches should be listed explicitly here
  11155. case LLM_ARCH_UNKNOWN:
  11156. GGML_ABORT("unknown architecture");
  11157. }
  11158. return LLAMA_ROPE_TYPE_NONE;
  11159. }
  11160. float llama_model_rope_freq_scale_train(const llama_model * model) {
  11161. return model->hparams.rope_freq_scale_train;
  11162. }
  11163. int32_t llama_model_meta_val_str(const llama_model * model, const char * key, char * buf, size_t buf_size) {
  11164. const auto & it = model->gguf_kv.find(key);
  11165. if (it == model->gguf_kv.end()) {
  11166. if (buf_size > 0) {
  11167. buf[0] = '\0';
  11168. }
  11169. return -1;
  11170. }
  11171. return snprintf(buf, buf_size, "%s", it->second.c_str());
  11172. }
  11173. int32_t llama_model_meta_count(const llama_model * model) {
  11174. return (int)model->gguf_kv.size();
  11175. }
  11176. int32_t llama_model_meta_key_by_index(const llama_model * model, int i, char * buf, size_t buf_size) {
  11177. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  11178. if (buf_size > 0) {
  11179. buf[0] = '\0';
  11180. }
  11181. return -1;
  11182. }
  11183. auto it = model->gguf_kv.begin();
  11184. std::advance(it, i);
  11185. return snprintf(buf, buf_size, "%s", it->first.c_str());
  11186. }
  11187. int32_t llama_model_meta_val_str_by_index(const llama_model * model, int32_t i, char * buf, size_t buf_size) {
  11188. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  11189. if (buf_size > 0) {
  11190. buf[0] = '\0';
  11191. }
  11192. return -1;
  11193. }
  11194. auto it = model->gguf_kv.begin();
  11195. std::advance(it, i);
  11196. return snprintf(buf, buf_size, "%s", it->second.c_str());
  11197. }
  11198. int32_t llama_model_desc(const llama_model * model, char * buf, size_t buf_size) {
  11199. return snprintf(buf, buf_size, "%s", model->desc().c_str());
  11200. }
  11201. uint64_t llama_model_size(const llama_model * model) {
  11202. return model->size();
  11203. }
  11204. const char * llama_model_chat_template(const llama_model * model, const char * name) {
  11205. const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE)
  11206. : LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE);
  11207. const auto & it = model->gguf_kv.find(key);
  11208. if (it == model->gguf_kv.end()) {
  11209. // one-off fix for very popular models (so we are not flooded with issues)
  11210. // do not extend this list unless absolutely necessary
  11211. // Mistral-Small-2503 does not have built-in chat template
  11212. llama_vocab_pre_type pre_type = model->vocab.get_pre_type();
  11213. if (pre_type == LLAMA_VOCAB_PRE_TYPE_TEKKEN && model->layers.size() == 40) {
  11214. return "mistral-v7-tekken";
  11215. }
  11216. return nullptr;
  11217. }
  11218. return it->second.c_str();
  11219. }
  11220. uint64_t llama_model_n_params(const llama_model * model) {
  11221. return model->n_elements();
  11222. }
  11223. bool llama_model_has_encoder(const llama_model * model) {
  11224. switch (model->arch) {
  11225. case LLM_ARCH_T5: return true;
  11226. case LLM_ARCH_T5ENCODER: return true;
  11227. default: return false;
  11228. }
  11229. }
  11230. bool llama_model_has_decoder(const llama_model * model) {
  11231. switch (model->arch) {
  11232. case LLM_ARCH_T5ENCODER: return false;
  11233. default: return true;
  11234. }
  11235. }
  11236. llama_token llama_model_decoder_start_token(const llama_model * model) {
  11237. return model->hparams.dec_start_token_id;
  11238. }
  11239. bool llama_model_is_recurrent(const llama_model * model) {
  11240. switch (model->arch) {
  11241. case LLM_ARCH_MAMBA: return true;
  11242. case LLM_ARCH_RWKV6: return true;
  11243. case LLM_ARCH_RWKV6QWEN2: return true;
  11244. case LLM_ARCH_RWKV7: return true;
  11245. case LLM_ARCH_ARWKV7: return true;
  11246. default: return false;
  11247. }
  11248. }
  11249. const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {
  11250. return model->tensors_by_name;
  11251. }