llama-model.cpp 572 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284
  1. #include "llama-model.h"
  2. #include "llama-impl.h"
  3. #include "llama-mmap.h"
  4. #include "llama-batch.h"
  5. #include "llama-cparams.h"
  6. #include "llama-model-loader.h"
  7. #include "llama-kv-cache.h"
  8. #include "ggml-cpp.h"
  9. #include <algorithm>
  10. #include <cassert>
  11. #include <cmath>
  12. #include <cfloat>
  13. #include <cstring>
  14. #include <cmath>
  15. #include <functional>
  16. #include <map>
  17. #include <regex>
  18. #include <sstream>
  19. #include <stdexcept>
  20. const char * llm_type_name(llm_type type) {
  21. switch (type) {
  22. case LLM_TYPE_14M: return "14M";
  23. case LLM_TYPE_17M: return "17M";
  24. case LLM_TYPE_22M: return "22M";
  25. case LLM_TYPE_33M: return "33M";
  26. case LLM_TYPE_60M: return "60M";
  27. case LLM_TYPE_70M: return "70M";
  28. case LLM_TYPE_80M: return "80M";
  29. case LLM_TYPE_109M: return "109M";
  30. case LLM_TYPE_137M: return "137M";
  31. case LLM_TYPE_160M: return "160M";
  32. case LLM_TYPE_190M: return "190M";
  33. case LLM_TYPE_220M: return "220M";
  34. case LLM_TYPE_250M: return "250M";
  35. case LLM_TYPE_270M: return "270M";
  36. case LLM_TYPE_335M: return "335M";
  37. case LLM_TYPE_410M: return "410M";
  38. case LLM_TYPE_450M: return "450M";
  39. case LLM_TYPE_770M: return "770M";
  40. case LLM_TYPE_780M: return "780M";
  41. case LLM_TYPE_0_5B: return "0.5B";
  42. case LLM_TYPE_1B: return "1B";
  43. case LLM_TYPE_1_3B: return "1.3B";
  44. case LLM_TYPE_1_4B: return "1.4B";
  45. case LLM_TYPE_1_5B: return "1.5B";
  46. case LLM_TYPE_1_6B: return "1.6B";
  47. case LLM_TYPE_1_8B: return "1.8B";
  48. case LLM_TYPE_2B: return "2B";
  49. case LLM_TYPE_2_8B: return "2.8B";
  50. case LLM_TYPE_2_9B: return "2.9B";
  51. case LLM_TYPE_3B: return "3B";
  52. case LLM_TYPE_4B: return "4B";
  53. case LLM_TYPE_6B: return "6B";
  54. case LLM_TYPE_6_9B: return "6.9B";
  55. case LLM_TYPE_7B: return "7B";
  56. case LLM_TYPE_8B: return "8B";
  57. case LLM_TYPE_9B: return "9B";
  58. case LLM_TYPE_11B: return "11B";
  59. case LLM_TYPE_12B: return "12B";
  60. case LLM_TYPE_13B: return "13B";
  61. case LLM_TYPE_14B: return "14B";
  62. case LLM_TYPE_15B: return "15B";
  63. case LLM_TYPE_16B: return "16B";
  64. case LLM_TYPE_20B: return "20B";
  65. case LLM_TYPE_30B: return "30B";
  66. case LLM_TYPE_32B: return "32B";
  67. case LLM_TYPE_34B: return "34B";
  68. case LLM_TYPE_35B: return "35B";
  69. case LLM_TYPE_40B: return "40B";
  70. case LLM_TYPE_65B: return "65B";
  71. case LLM_TYPE_70B: return "70B";
  72. case LLM_TYPE_236B: return "236B";
  73. case LLM_TYPE_314B: return "314B";
  74. case LLM_TYPE_671B: return "671B";
  75. case LLM_TYPE_SMALL: return "0.1B";
  76. case LLM_TYPE_MEDIUM: return "0.4B";
  77. case LLM_TYPE_LARGE: return "0.8B";
  78. case LLM_TYPE_XL: return "1.5B";
  79. case LLM_TYPE_A1_7B: return "A1.7B";
  80. case LLM_TYPE_A2_7B: return "A2.7B";
  81. case LLM_TYPE_8x7B: return "8x7B";
  82. case LLM_TYPE_8x22B: return "8x22B";
  83. case LLM_TYPE_16x12B: return "16x12B";
  84. case LLM_TYPE_16x3_8B: return "16x3.8B";
  85. case LLM_TYPE_10B_128x3_66B: return "10B+128x3.66B";
  86. case LLM_TYPE_57B_A14B: return "57B.A14B";
  87. case LLM_TYPE_27B: return "27B";
  88. case LLM_TYPE_290B: return "290B";
  89. case LLM_TYPE_17B_16E: return "17Bx16E (Scout)";
  90. case LLM_TYPE_17B_128E: return "17Bx128E (Maverick)";
  91. default: return "?B";
  92. }
  93. }
  94. static const char * llama_expert_gating_func_name(llama_expert_gating_func_type type) {
  95. switch (type) {
  96. case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: return "softmax";
  97. case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: return "sigmoid";
  98. default: return "unknown";
  99. }
  100. }
  101. static const std::map<llama_rope_scaling_type, const char *> LLAMA_ROPE_SCALING_TYPES = {
  102. { LLAMA_ROPE_SCALING_TYPE_NONE, "none" },
  103. { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" },
  104. { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" },
  105. { LLAMA_ROPE_SCALING_TYPE_LONGROPE, "longrope" },
  106. };
  107. static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
  108. for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
  109. if (kv.second == name) {
  110. return (llama_rope_scaling_type) kv.first;
  111. }
  112. }
  113. return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
  114. }
  115. // checks if the weight tensor can be used with the specified buffer type and device
  116. static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w, ggml_op op, ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev) {
  117. GGML_ASSERT(w != nullptr);
  118. if (op == GGML_OP_NONE) {
  119. return true;
  120. }
  121. ggml_init_params params = {
  122. /*.mem_size =*/ ggml_tensor_overhead()*8,
  123. /*.mem_buffer =*/ NULL,
  124. /*.no_alloc =*/ true,
  125. };
  126. ggml_context_ptr ctx_ptr { ggml_init(params) };
  127. if (!ctx_ptr) {
  128. throw std::runtime_error(format("failed to create ggml context"));
  129. }
  130. ggml_context * ctx = ctx_ptr.get();
  131. ggml_tensor * op_tensor = nullptr;
  132. switch (op) {
  133. case GGML_OP_GET_ROWS:
  134. {
  135. ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512);
  136. op_tensor = ggml_get_rows(ctx, w, b);
  137. } break;
  138. case GGML_OP_MUL_MAT:
  139. {
  140. ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], 512, w->ne[2], w->ne[3]);
  141. op_tensor = ggml_mul_mat(ctx, w, b);
  142. } break;
  143. case GGML_OP_MUL_MAT_ID:
  144. {
  145. int n_expert_used = hparams.n_expert_used;
  146. ggml_tensor * b = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512);
  147. ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512);
  148. op_tensor = ggml_mul_mat_id(ctx, w, b, ids);
  149. } break;
  150. case GGML_OP_ADD:
  151. {
  152. ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
  153. op_tensor = ggml_add(ctx, a, w);
  154. } break;
  155. case GGML_OP_MUL:
  156. {
  157. ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
  158. op_tensor = ggml_mul(ctx, a, w);
  159. } break;
  160. case GGML_OP_DIV:
  161. {
  162. ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, w->ne[0]);
  163. op_tensor = ggml_div(ctx, a, w);
  164. } break;
  165. case GGML_OP_ROPE:
  166. {
  167. int n_embd_head = hparams.n_embd_head_v;
  168. int n_head = hparams.n_head();
  169. ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_embd_head, n_head, 512);
  170. ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512);
  171. op_tensor = ggml_rope_ext(
  172. ctx, a, b, w,
  173. 0, 0, 0, 0, 0,
  174. 0, 0, 0, 0
  175. );
  176. } break;
  177. case GGML_OP_SSM_CONV:
  178. {
  179. // FIXME
  180. ggml_tensor * conv_x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 12345, w->ne[1], 6789);
  181. op_tensor = ggml_ssm_conv(ctx, conv_x, w);
  182. } break;
  183. case GGML_OP_SSM_SCAN:
  184. {
  185. // FIXME
  186. const int64_t d_state = w->ne[0];
  187. const int64_t d_inner = w->ne[1];
  188. const int64_t n_seq_tokens = 512;
  189. const int64_t n_seqs = 1;
  190. ggml_tensor * s = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, d_inner, n_seqs);
  191. ggml_tensor * x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_inner, n_seq_tokens, n_seqs);
  192. ggml_tensor * dt = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_inner, n_seq_tokens, n_seqs);
  193. ggml_tensor * B = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, n_seq_tokens, n_seqs);
  194. ggml_tensor * C = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, n_seq_tokens, n_seqs);
  195. op_tensor = ggml_ssm_scan(ctx, s, x, dt, w, B, C);
  196. } break;
  197. case GGML_OP_RWKV_WKV6:
  198. {
  199. // FIXME
  200. const int64_t S = 123;
  201. const int64_t H = 123;
  202. const int64_t n_tokens = 123;
  203. const int64_t n_seqs = 123;
  204. ggml_tensor * k = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
  205. ggml_tensor * v = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
  206. ggml_tensor * r = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
  207. ggml_tensor * tf = w;
  208. ggml_tensor * td = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
  209. ggml_tensor * state = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, S, n_seqs, S, H);
  210. op_tensor = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, state);
  211. } break;
  212. case GGML_OP_IM2COL:
  213. {
  214. const int n_embd = hparams.n_embd;
  215. ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd, w->ne[1], 1, 1);
  216. op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16);
  217. } break;
  218. default:
  219. GGML_ABORT("%s: missing test for op %s for tensor %s", __func__, ggml_op_name(op), w->name);
  220. }
  221. // create a temporary dummy buffer for the weight so that supports_op can check the buffer type
  222. GGML_ASSERT(w->buffer == nullptr);
  223. w->buffer = ggml_backend_buft_alloc_buffer(buft, 0);
  224. bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
  225. ggml_backend_buffer_free(w->buffer);
  226. w->buffer = nullptr;
  227. return op_supported;
  228. }
  229. // lists of buffer types used for each layer
  230. using buft_list_t = std::vector<std::pair<ggml_backend_dev_t, ggml_backend_buffer_type_t>>;
  231. // find the first buffer type in the list that can use the tensor
  232. static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hparams, ggml_tensor * tensor, ggml_op op, const buft_list_t & buft_list) {
  233. GGML_ASSERT(!buft_list.empty());
  234. for (const auto & cur : buft_list) {
  235. ggml_backend_dev_t cur_dev = cur.first;
  236. ggml_backend_buffer_type_t cur_buft = cur.second;
  237. if (weight_buft_supported(hparams, tensor, op, cur_buft, cur_dev)) {
  238. return cur_buft;
  239. }
  240. }
  241. return nullptr;
  242. }
  243. // CPU: ACCEL -> GPU host -> CPU extra -> CPU
  244. static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices) {
  245. buft_list_t buft_list;
  246. // add ACCEL buffer types
  247. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  248. ggml_backend_dev_t dev = ggml_backend_dev_get(i);
  249. if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
  250. auto * buft = ggml_backend_dev_buffer_type(dev);
  251. // skip
  252. if (buft != ggml_backend_cpu_buffer_type()) {
  253. buft_list.emplace_back(dev, buft);
  254. }
  255. }
  256. }
  257. // add a host buffer type
  258. // storing the tensors in a host buffer is useful when the processing of large batches
  259. // is offloaded to a GPU device, since it reduces the time spent on data transfers
  260. // generally, this will be done using the first device in the list
  261. // a better approach would be to handle this on a weight-by-weight basis using the offload_op
  262. // function of the device to determine if it would benefit from being stored in a host buffer
  263. for (auto * dev : devices) {
  264. ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev);
  265. if (buft) {
  266. buft_list.emplace_back(dev, buft);
  267. break;
  268. }
  269. }
  270. // add extra buffer types, only if no GPU device is present
  271. // ref: https://github.com/ggml-org/llama.cpp/issues/12481#issuecomment-2743136094
  272. auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
  273. auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
  274. auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
  275. ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts");
  276. if (ggml_backend_dev_get_extra_bufts_fn) {
  277. ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev);
  278. while (extra_bufts && *extra_bufts) {
  279. buft_list.emplace_back(cpu_dev, *extra_bufts);
  280. ++extra_bufts;
  281. }
  282. }
  283. // add the CPU buffer type
  284. for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
  285. ggml_backend_dev_t dev = ggml_backend_dev_get(i);
  286. if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU) {
  287. buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
  288. }
  289. }
  290. return buft_list;
  291. }
  292. // GPU: split if LLAMA_SPLIT_MODE_ROW -> GPU
  293. static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode split_mode, const float * tensor_split) {
  294. buft_list_t buft_list;
  295. // add the device split buffer type if requested and available
  296. if (split_mode == LLAMA_SPLIT_MODE_ROW) {
  297. ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
  298. auto ggml_backend_split_buffer_type_fn = (ggml_backend_split_buffer_type_t)
  299. ggml_backend_reg_get_proc_address(reg, "ggml_backend_split_buffer_type");
  300. if (ggml_backend_split_buffer_type_fn) {
  301. size_t dev_index = [&]() {
  302. auto * reg = ggml_backend_dev_backend_reg(dev);
  303. for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); ++i) {
  304. if (ggml_backend_reg_dev_get(reg, i) == dev) {
  305. return i;
  306. }
  307. }
  308. throw std::runtime_error(format("device %s not found in its backend reg", ggml_backend_dev_name(dev)));
  309. }();
  310. auto * buft = ggml_backend_split_buffer_type_fn(dev_index, tensor_split);
  311. if (buft != nullptr) {
  312. buft_list.emplace_back(dev, buft);
  313. }
  314. }
  315. }
  316. // add the device default buffer type
  317. buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
  318. return buft_list;
  319. }
  320. struct llama_model::impl {
  321. impl() {}
  322. ~impl() {}
  323. uint64_t n_elements = 0;
  324. size_t n_bytes = 0;
  325. std::string desc_str;
  326. // model memory mapped files
  327. llama_mmaps mappings;
  328. // objects representing data potentially being locked in memory
  329. llama_mlocks mlock_bufs;
  330. llama_mlocks mlock_mmaps;
  331. // contexts where the model tensors metadata is stored
  332. std::vector<ggml_context_ptr> ctxs;
  333. // the model memory buffers for the tensor data
  334. std::vector<ggml_backend_buffer_ptr> bufs;
  335. buft_list_t cpu_buft_list;
  336. std::map<ggml_backend_dev_t, buft_list_t> gpu_buft_list;
  337. struct layer_dev {
  338. ggml_backend_dev_t dev;
  339. buft_list_t * buft_list;
  340. };
  341. layer_dev dev_input = {};
  342. layer_dev dev_output = {};
  343. std::vector<layer_dev> dev_layer;
  344. bool has_tensor_overrides;
  345. };
  346. llama_model::llama_model(const llama_model_params & params) : params(params), pimpl(std::make_unique<impl>()) {
  347. pimpl->has_tensor_overrides = params.tensor_buft_overrides && params.tensor_buft_overrides[0].pattern;
  348. }
  349. llama_model::~llama_model() {}
  350. void llama_model::load_stats(llama_model_loader & ml) {
  351. pimpl->n_elements = ml.n_elements;
  352. pimpl->n_bytes = ml.n_bytes;
  353. }
  354. void llama_model::load_arch(llama_model_loader & ml) {
  355. arch = ml.get_arch();
  356. if (arch == LLM_ARCH_UNKNOWN) {
  357. throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
  358. }
  359. }
  360. void llama_model::load_hparams(llama_model_loader & ml) {
  361. const gguf_context * ctx = ml.meta.get();
  362. // get metadata as string
  363. for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
  364. gguf_type type = gguf_get_kv_type(ctx, i);
  365. if (type == GGUF_TYPE_ARRAY) {
  366. continue;
  367. }
  368. const char * name = gguf_get_key(ctx, i);
  369. const std::string value = gguf_kv_to_str(ctx, i);
  370. gguf_kv.emplace(name, value);
  371. }
  372. // get general kv
  373. ml.get_key(LLM_KV_GENERAL_NAME, name, false);
  374. // everything past this point is not vocab-related
  375. if (hparams.vocab_only) {
  376. return;
  377. }
  378. ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
  379. ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
  380. ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer);
  381. ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
  382. ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
  383. if (arch == LLM_ARCH_WAVTOKENIZER_DEC) {
  384. ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features);
  385. ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd);
  386. ml.get_key(LLM_KV_POSNET_BLOCK_COUNT, hparams.posnet.n_layer);
  387. ml.get_key(LLM_KV_CONVNEXT_EMBEDDING_LENGTH, hparams.convnext.n_embd);
  388. ml.get_key(LLM_KV_CONVNEXT_BLOCK_COUNT, hparams.convnext.n_layer);
  389. }
  390. GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
  391. GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
  392. if (hparams.n_expert > 0) {
  393. GGML_ASSERT(hparams.n_expert_used > 0);
  394. } else {
  395. GGML_ASSERT(hparams.n_expert_used == 0);
  396. }
  397. // zero-out the array hparams
  398. std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0);
  399. std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
  400. std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0);
  401. ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer, false);
  402. ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);
  403. // n_head_kv is optional, default to n_head
  404. hparams.n_head_kv_arr = hparams.n_head_arr;
  405. ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, hparams.n_layer, false);
  406. bool rope_finetuned = false;
  407. ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
  408. hparams.rope_finetuned = rope_finetuned;
  409. hparams.n_ctx_orig_yarn = hparams.n_ctx_train;
  410. ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false);
  411. // rope_freq_base (optional)
  412. hparams.rope_freq_base_train = 10000.0f;
  413. ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
  414. std::string rope_scaling("linear");
  415. ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
  416. hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
  417. GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED);
  418. // rope_freq_scale (inverse of the kv) is optional
  419. float ropescale = 0.0f;
  420. if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
  421. // try the old key name
  422. ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
  423. }
  424. hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
  425. // by default assume that the sliding-window layers use the same scaling type as the non-sliding-window layers
  426. hparams.rope_freq_base_train_swa = hparams.rope_freq_base_train;
  427. hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
  428. ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false);
  429. // non-transformer models do not have attention heads
  430. if (hparams.n_head() > 0) {
  431. // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
  432. // gpt-j n_rot = rotary_dim
  433. hparams.n_embd_head_k = hparams.n_embd / hparams.n_head();
  434. ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
  435. hparams.n_embd_head_v = hparams.n_embd / hparams.n_head();
  436. ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
  437. // sanity check for n_rot (optional)
  438. hparams.n_rot = hparams.n_embd_head_k;
  439. ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
  440. if (arch == LLM_ARCH_LLAMA || arch == LLM_ARCH_DECI || arch == LLM_ARCH_FALCON) {
  441. if (hparams.n_rot != hparams.n_embd_head_k) {
  442. throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k));
  443. }
  444. }
  445. } else {
  446. hparams.n_rot = 0;
  447. hparams.n_embd_head_k = 0;
  448. hparams.n_embd_head_v = 0;
  449. }
  450. // for differentiating model types
  451. uint32_t n_vocab = 0;
  452. ml.get_key(LLM_KV_VOCAB_SIZE, n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, n_vocab, false);
  453. // arch-specific KVs
  454. switch (arch) {
  455. case LLM_ARCH_LLAMA:
  456. {
  457. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  458. if (hparams.n_expert == 8) {
  459. switch (hparams.n_layer) {
  460. case 32: type = LLM_TYPE_8x7B; break;
  461. case 56: type = LLM_TYPE_8x22B; break;
  462. default: type = LLM_TYPE_UNKNOWN;
  463. }
  464. } else {
  465. switch (hparams.n_layer) {
  466. case 16: type = LLM_TYPE_1B; break; // Llama 3.2 1B
  467. case 22: type = LLM_TYPE_1B; break;
  468. case 26: type = LLM_TYPE_3B; break;
  469. case 28: type = LLM_TYPE_3B; break; // Llama 3.2 3B
  470. // granite uses a vocab with len 49152
  471. case 32: type = n_vocab == 49152 ? LLM_TYPE_3B : (n_vocab < 40000 ? LLM_TYPE_7B : LLM_TYPE_8B); break;
  472. case 36: type = LLM_TYPE_8B; break; // granite
  473. case 40: type = LLM_TYPE_13B; break;
  474. case 48: type = LLM_TYPE_34B; break;
  475. case 60: type = LLM_TYPE_30B; break;
  476. case 80: type = hparams.n_head() == hparams.n_head_kv() ? LLM_TYPE_65B : LLM_TYPE_70B; break;
  477. default: type = LLM_TYPE_UNKNOWN;
  478. }
  479. }
  480. } break;
  481. case LLM_ARCH_LLAMA4:
  482. {
  483. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  484. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
  485. ml.get_key(LLM_KV_INTERLEAVE_MOE_LAYER_STEP, hparams.n_moe_layer_step);
  486. hparams.n_swa_pattern = 4; // pattern: 3 chunked - 1 full
  487. hparams.n_attn_chunk = 8192; // should this be a gguf kv? currently it's the same for Scout and Maverick
  488. hparams.n_swa = 1; // TODO @ngxson : this is added to trigger the SWA branch (we store the chunked attn mask in the SWA tensor), will need to clean this up later
  489. switch (hparams.n_expert) {
  490. case 16: type = LLM_TYPE_17B_16E; break;
  491. case 128: type = LLM_TYPE_17B_128E; break;
  492. default: type = LLM_TYPE_UNKNOWN;
  493. }
  494. if (type == LLM_TYPE_17B_128E) {
  495. hparams.use_kq_norm = false;
  496. }
  497. } break;
  498. case LLM_ARCH_DECI:
  499. {
  500. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  501. switch (hparams.n_layer) {
  502. case 32: type = LLM_TYPE_7B; break;
  503. case 80: type = LLM_TYPE_70B; break;
  504. default: type = LLM_TYPE_UNKNOWN;
  505. }
  506. } break;
  507. case LLM_ARCH_MINICPM:
  508. {
  509. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  510. ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
  511. ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
  512. ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
  513. switch (hparams.n_layer) {
  514. case 52: type = LLM_TYPE_1B; break;
  515. case 40: type = LLM_TYPE_2B; break;
  516. default: type = LLM_TYPE_UNKNOWN;
  517. }
  518. } break;
  519. case LLM_ARCH_MINICPM3:
  520. {
  521. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  522. ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
  523. ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
  524. switch (hparams.n_layer) {
  525. case 62: type = LLM_TYPE_4B; break;
  526. default: type = LLM_TYPE_UNKNOWN;
  527. }
  528. } break;
  529. case LLM_ARCH_GROK:
  530. {
  531. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  532. switch (hparams.n_layer) {
  533. case 64: type = LLM_TYPE_314B; break;
  534. default: type = LLM_TYPE_UNKNOWN;
  535. }
  536. } break;
  537. case LLM_ARCH_FALCON:
  538. {
  539. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  540. switch (hparams.n_layer) {
  541. case 32: type = LLM_TYPE_7B; break;
  542. case 60: type = LLM_TYPE_40B; break;
  543. default: type = LLM_TYPE_UNKNOWN;
  544. }
  545. } break;
  546. case LLM_ARCH_BAICHUAN:
  547. {
  548. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  549. switch (hparams.n_layer) {
  550. case 32: type = LLM_TYPE_7B; break;
  551. case 40: type = LLM_TYPE_13B; break;
  552. default: type = LLM_TYPE_UNKNOWN;
  553. }
  554. if (type == LLM_TYPE_13B) {
  555. // TODO: become GGUF KV parameter
  556. hparams.f_max_alibi_bias = 8.0f;
  557. }
  558. } break;
  559. case LLM_ARCH_STARCODER:
  560. {
  561. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  562. switch (hparams.n_layer) {
  563. case 24: type = LLM_TYPE_1B; break;
  564. case 36: type = LLM_TYPE_3B; break;
  565. case 42: type = LLM_TYPE_7B; break;
  566. case 40: type = LLM_TYPE_15B; break;
  567. default: type = LLM_TYPE_UNKNOWN;
  568. }
  569. } break;
  570. case LLM_ARCH_REFACT:
  571. {
  572. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  573. switch (hparams.n_layer) {
  574. case 32: type = LLM_TYPE_1B; break;
  575. default: type = LLM_TYPE_UNKNOWN;
  576. }
  577. // TODO: become GGUF KV parameter
  578. hparams.f_max_alibi_bias = 8.0f;
  579. } break;
  580. case LLM_ARCH_BERT:
  581. {
  582. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  583. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  584. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
  585. switch (hparams.n_layer) {
  586. case 3:
  587. type = LLM_TYPE_17M; break; // bge-micro
  588. case 6:
  589. type = LLM_TYPE_22M; break; // MiniLM-L6
  590. case 12:
  591. switch (hparams.n_embd) {
  592. case 384: type = LLM_TYPE_33M; break; // MiniLM-L12, bge-small
  593. case 768: type = LLM_TYPE_109M; break; // bge-base
  594. default: type = LLM_TYPE_UNKNOWN;
  595. } break;
  596. case 24:
  597. type = LLM_TYPE_335M; break; // bge-large
  598. default: type = LLM_TYPE_UNKNOWN;
  599. }
  600. } break;
  601. case LLM_ARCH_JINA_BERT_V2:
  602. {
  603. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  604. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  605. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
  606. hparams.f_max_alibi_bias = 8.0f;
  607. switch (hparams.n_layer) {
  608. case 4: type = LLM_TYPE_33M; break; // jina-embeddings-small
  609. case 12: type = LLM_TYPE_137M; break; // jina-embeddings-base
  610. default: type = LLM_TYPE_UNKNOWN;
  611. }
  612. } break;
  613. case LLM_ARCH_NOMIC_BERT:
  614. {
  615. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  616. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  617. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
  618. if (hparams.n_layer == 12 && hparams.n_embd == 768) {
  619. type = LLM_TYPE_137M;
  620. }
  621. } break;
  622. case LLM_ARCH_BLOOM:
  623. {
  624. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  625. switch (hparams.n_layer) {
  626. case 24: type = LLM_TYPE_1B; break;
  627. case 30:
  628. switch (hparams.n_embd) {
  629. case 2560: type = LLM_TYPE_3B; break;
  630. case 4096: type = LLM_TYPE_7B; break;
  631. default: type = LLM_TYPE_UNKNOWN;
  632. } break;
  633. default: type = LLM_TYPE_UNKNOWN;
  634. }
  635. // TODO: become GGUF KV parameter
  636. hparams.f_max_alibi_bias = 8.0f;
  637. } break;
  638. case LLM_ARCH_MPT:
  639. {
  640. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  641. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  642. ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
  643. switch (hparams.n_layer) {
  644. case 32: type = LLM_TYPE_7B; break;
  645. case 48: type = LLM_TYPE_30B; break;
  646. default: type = LLM_TYPE_UNKNOWN;
  647. }
  648. } break;
  649. case LLM_ARCH_STABLELM:
  650. {
  651. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  652. switch (hparams.n_layer) {
  653. case 24: type = LLM_TYPE_1B; break;
  654. case 32: type = LLM_TYPE_3B; break;
  655. case 40: type = LLM_TYPE_12B; break;
  656. default: type = LLM_TYPE_UNKNOWN;
  657. }
  658. } break;
  659. case LLM_ARCH_QWEN:
  660. {
  661. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  662. switch (hparams.n_layer) {
  663. case 32: type = LLM_TYPE_7B; break;
  664. case 40: type = LLM_TYPE_13B; break;
  665. default: type = LLM_TYPE_UNKNOWN;
  666. }
  667. } break;
  668. case LLM_ARCH_QWEN2VL:
  669. {
  670. ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true);
  671. }
  672. // fall through
  673. case LLM_ARCH_QWEN2:
  674. {
  675. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  676. switch (hparams.n_layer) {
  677. case 24: type = hparams.n_embd == 1024 ? LLM_TYPE_0_5B : LLM_TYPE_1B; break;
  678. case 28: type = hparams.n_embd == 1536 ? LLM_TYPE_1_5B : LLM_TYPE_7B; break;
  679. case 32: type = LLM_TYPE_7B; break;
  680. case 36: type = LLM_TYPE_3B; break;
  681. case 40: type = hparams.n_head() == 20 ? LLM_TYPE_4B : LLM_TYPE_13B; break;
  682. case 48: type = LLM_TYPE_14B; break;
  683. case 64: type = LLM_TYPE_32B; break;
  684. case 80: type = LLM_TYPE_70B; break;
  685. default: type = LLM_TYPE_UNKNOWN;
  686. }
  687. } break;
  688. case LLM_ARCH_QWEN2MOE:
  689. {
  690. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
  691. ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
  692. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  693. switch (hparams.n_layer) {
  694. case 24: type = LLM_TYPE_A2_7B; break;
  695. case 28: type = LLM_TYPE_57B_A14B; break;
  696. default: type = LLM_TYPE_UNKNOWN;
  697. }
  698. } break;
  699. case LLM_ARCH_QWEN3:
  700. {
  701. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  702. switch (hparams.n_layer) {
  703. default: type = LLM_TYPE_UNKNOWN;
  704. }
  705. } break;
  706. case LLM_ARCH_QWEN3MOE:
  707. {
  708. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
  709. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  710. switch (hparams.n_layer) {
  711. default: type = LLM_TYPE_UNKNOWN;
  712. }
  713. } break;
  714. case LLM_ARCH_PHI2:
  715. {
  716. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  717. switch (hparams.n_layer) {
  718. case 24: type = LLM_TYPE_1B; break;
  719. case 32: type = LLM_TYPE_3B; break;
  720. default: type = LLM_TYPE_UNKNOWN;
  721. }
  722. } break;
  723. case LLM_ARCH_PHI3:
  724. {
  725. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  726. switch (hparams.n_layer) {
  727. case 24: type = LLM_TYPE_1B; break;
  728. case 32: type = LLM_TYPE_3B; break;
  729. case 40: type = LLM_TYPE_14B; break;
  730. default: type = LLM_TYPE_UNKNOWN;
  731. }
  732. // for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931
  733. if ((hparams.n_layer == 32 || hparams.n_layer == 40) && hparams.n_ctx_train == 4096) {
  734. // default value for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct
  735. hparams.n_swa = 2047;
  736. } else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) {
  737. // default value for Phi-3-mini-128k-instruct
  738. // note: this seems incorrect because the window is bigger than the train context?
  739. hparams.n_swa = 262144;
  740. } else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) {
  741. // default value for Phi-3-medium-128k-instruct
  742. // note: this seems incorrect because the window is equal to the train context?
  743. hparams.n_swa = 131072;
  744. }
  745. bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
  746. if (!found_swa && hparams.n_swa == 0) {
  747. throw std::runtime_error("invalid value for sliding_window");
  748. }
  749. } break;
  750. case LLM_ARCH_PHIMOE:
  751. {
  752. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  753. switch (hparams.n_layer) {
  754. case 32: type = LLM_TYPE_16x3_8B; break;
  755. default: type = LLM_TYPE_UNKNOWN;
  756. }
  757. } break;
  758. case LLM_ARCH_PLAMO:
  759. {
  760. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  761. switch (hparams.n_layer) {
  762. case 40: type = LLM_TYPE_13B; break;
  763. default: type = LLM_TYPE_UNKNOWN;
  764. }
  765. } break;
  766. case LLM_ARCH_GPT2:
  767. {
  768. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  769. switch (hparams.n_layer) {
  770. case 12: type = LLM_TYPE_SMALL; break;
  771. case 24: type = LLM_TYPE_MEDIUM; break;
  772. case 36: type = LLM_TYPE_LARGE; break;
  773. case 48: type = LLM_TYPE_XL; break;
  774. default: type = LLM_TYPE_UNKNOWN;
  775. }
  776. } break;
  777. case LLM_ARCH_CODESHELL:
  778. {
  779. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  780. switch (hparams.n_layer) {
  781. case 42: type = LLM_TYPE_7B; break;
  782. default: type = LLM_TYPE_UNKNOWN;
  783. }
  784. } break;
  785. case LLM_ARCH_ORION:
  786. {
  787. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  788. switch (hparams.n_layer) {
  789. case 40: type = LLM_TYPE_14B; break;
  790. default: type = LLM_TYPE_UNKNOWN;
  791. }
  792. } break;
  793. case LLM_ARCH_INTERNLM2:
  794. {
  795. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  796. switch (hparams.n_layer) {
  797. case 32: type = LLM_TYPE_7B; break;
  798. case 48: type = LLM_TYPE_20B; break;
  799. default: type = LLM_TYPE_UNKNOWN;
  800. }
  801. } break;
  802. case LLM_ARCH_GEMMA:
  803. {
  804. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  805. switch (hparams.n_layer) {
  806. case 18: type = LLM_TYPE_2B; break;
  807. case 28: type = LLM_TYPE_7B; break;
  808. default: type = LLM_TYPE_UNKNOWN;
  809. }
  810. } break;
  811. case LLM_ARCH_GEMMA2:
  812. {
  813. hparams.n_swa = 4096; // default value of gemma 2
  814. hparams.n_swa_pattern = 2;
  815. hparams.attn_soft_cap = true;
  816. ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
  817. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  818. ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false);
  819. ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
  820. switch (hparams.n_layer) {
  821. case 26: type = LLM_TYPE_2B; break;
  822. case 42: type = LLM_TYPE_9B; break;
  823. case 46: type = LLM_TYPE_27B; break;
  824. default: type = LLM_TYPE_UNKNOWN;
  825. }
  826. } break;
  827. case LLM_ARCH_GEMMA3:
  828. {
  829. hparams.n_swa_pattern = 6;
  830. hparams.rope_freq_base_train_swa = 10000.0f;
  831. hparams.rope_freq_scale_train_swa = 1.0f;
  832. ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
  833. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  834. switch (hparams.n_layer) {
  835. case 26: type = LLM_TYPE_1B; break;
  836. case 34: type = LLM_TYPE_4B; break;
  837. case 48: type = LLM_TYPE_12B; break;
  838. case 62: type = LLM_TYPE_27B; break;
  839. default: type = LLM_TYPE_UNKNOWN;
  840. }
  841. hparams.f_attention_scale = type == LLM_TYPE_27B
  842. ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0)))
  843. : 1.0f / std::sqrt(float(hparams.n_embd_head_k));
  844. } break;
  845. case LLM_ARCH_STARCODER2:
  846. {
  847. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  848. switch (hparams.n_layer) {
  849. case 30: type = LLM_TYPE_3B; break;
  850. case 32: type = LLM_TYPE_7B; break;
  851. case 40: type = LLM_TYPE_15B; break;
  852. case 52: type = LLM_TYPE_20B; break; // granite
  853. case 88: type = LLM_TYPE_34B; break; // granite
  854. default: type = LLM_TYPE_UNKNOWN;
  855. }
  856. } break;
  857. case LLM_ARCH_MAMBA:
  858. {
  859. ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv);
  860. ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner);
  861. ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state);
  862. ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
  863. ml.get_key(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms, false);
  864. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  865. switch (hparams.n_layer) {
  866. case 24:
  867. switch (hparams.n_embd) {
  868. case 768: type = LLM_TYPE_SMALL; break;
  869. default: type = LLM_TYPE_UNKNOWN;
  870. } break;
  871. case 48:
  872. switch (hparams.n_embd) {
  873. case 1024: type = LLM_TYPE_MEDIUM; break;
  874. case 1536: type = LLM_TYPE_LARGE; break;
  875. case 2048: type = LLM_TYPE_XL; break;
  876. default: type = LLM_TYPE_UNKNOWN;
  877. } break;
  878. case 64:
  879. switch (hparams.n_embd) {
  880. case 2560: type = LLM_TYPE_3B; break;
  881. default: type = LLM_TYPE_UNKNOWN;
  882. } break;
  883. default: type = LLM_TYPE_UNKNOWN;
  884. }
  885. } break;
  886. case LLM_ARCH_XVERSE:
  887. {
  888. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  889. switch (hparams.n_layer) {
  890. case 32: type = LLM_TYPE_7B; break;
  891. case 40: type = LLM_TYPE_13B; break;
  892. case 80: type = LLM_TYPE_65B; break;
  893. default: type = LLM_TYPE_UNKNOWN;
  894. }
  895. } break;
  896. case LLM_ARCH_COMMAND_R:
  897. {
  898. ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
  899. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  900. switch (hparams.n_layer) {
  901. case 40: type = LLM_TYPE_35B; break;
  902. default: type = LLM_TYPE_UNKNOWN;
  903. }
  904. } break;
  905. case LLM_ARCH_COHERE2:
  906. {
  907. hparams.n_swa_pattern = 4;
  908. ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
  909. ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
  910. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  911. switch (hparams.n_layer) {
  912. case 32: type = LLM_TYPE_8B; break;
  913. default: type = LLM_TYPE_UNKNOWN;
  914. }
  915. } break;
  916. case LLM_ARCH_DBRX:
  917. {
  918. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  919. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv);
  920. switch (hparams.n_layer) {
  921. case 40: type = LLM_TYPE_16x12B; break;
  922. default: type = LLM_TYPE_UNKNOWN;
  923. }
  924. } break;
  925. case LLM_ARCH_OLMO:
  926. {
  927. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  928. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  929. switch (hparams.n_layer) {
  930. case 22: type = LLM_TYPE_1B; break;
  931. case 32: type = LLM_TYPE_7B; break;
  932. case 80: type = LLM_TYPE_70B; break;
  933. default: type = LLM_TYPE_UNKNOWN;
  934. }
  935. } break;
  936. case LLM_ARCH_OLMO2:
  937. {
  938. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  939. switch (hparams.n_layer) {
  940. case 16: type = LLM_TYPE_1B; break;
  941. case 32: type = LLM_TYPE_7B; break;
  942. case 40: type = LLM_TYPE_13B; break;
  943. case 64: type = LLM_TYPE_32B; break;
  944. default: type = LLM_TYPE_UNKNOWN;
  945. }
  946. } break;
  947. case LLM_ARCH_OLMOE:
  948. {
  949. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  950. switch (hparams.n_layer) {
  951. case 16: type = LLM_TYPE_A1_7B; break;
  952. default: type = LLM_TYPE_UNKNOWN;
  953. }
  954. } break;
  955. case LLM_ARCH_OPENELM:
  956. {
  957. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  958. switch (hparams.n_layer) {
  959. case 16: type = LLM_TYPE_270M; break;
  960. case 20: type = LLM_TYPE_450M; break;
  961. case 28: type = LLM_TYPE_1B; break;
  962. case 36: type = LLM_TYPE_3B; break;
  963. default: type = LLM_TYPE_UNKNOWN;
  964. }
  965. } break;
  966. case LLM_ARCH_GPTNEOX:
  967. {
  968. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  969. ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res);
  970. switch (hparams.n_layer) {
  971. case 6:
  972. switch (hparams.n_ff()) {
  973. case 512: type = LLM_TYPE_14M; break;
  974. case 2048: type = LLM_TYPE_70M; break;
  975. default: type = LLM_TYPE_UNKNOWN;
  976. } break;
  977. case 12:
  978. switch (hparams.n_ff()) {
  979. case 3072: type = LLM_TYPE_160M; break;
  980. default: type = LLM_TYPE_UNKNOWN;
  981. } break;
  982. case 16:
  983. switch (hparams.n_ff()) {
  984. case 8192: type = LLM_TYPE_1B; break;
  985. default: type = LLM_TYPE_UNKNOWN;
  986. } break;
  987. case 24:
  988. switch (hparams.n_ff()) {
  989. case 4096: type = LLM_TYPE_410M; break;
  990. case 8192: type = LLM_TYPE_1_4B; break;
  991. default: type = LLM_TYPE_UNKNOWN;
  992. } break;
  993. case 32:
  994. switch (hparams.n_ff()) {
  995. case 10240: type = LLM_TYPE_2_8B; break;
  996. case 16384: type = LLM_TYPE_6_9B; break;
  997. default: type = LLM_TYPE_UNKNOWN;
  998. } break;
  999. case 36:
  1000. switch (hparams.n_ff()) {
  1001. case 20480: type = LLM_TYPE_12B; break;
  1002. default: type = LLM_TYPE_UNKNOWN;
  1003. } break;
  1004. case 44:
  1005. switch (hparams.n_ff()) {
  1006. case 24576: type = LLM_TYPE_20B; break;
  1007. default: type = LLM_TYPE_UNKNOWN;
  1008. } break;
  1009. default: type = LLM_TYPE_UNKNOWN;
  1010. }
  1011. } break;
  1012. case LLM_ARCH_ARCTIC:
  1013. {
  1014. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1015. if (hparams.n_expert == 128) {
  1016. switch (hparams.n_layer) {
  1017. case 35: type = LLM_TYPE_10B_128x3_66B; break;
  1018. default: type = LLM_TYPE_UNKNOWN;
  1019. }
  1020. } else {
  1021. type = LLM_TYPE_UNKNOWN;
  1022. }
  1023. } break;
  1024. case LLM_ARCH_DEEPSEEK:
  1025. {
  1026. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1027. ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
  1028. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
  1029. ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
  1030. ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
  1031. switch (hparams.n_layer) {
  1032. case 28: type = LLM_TYPE_20B; break;
  1033. default: type = LLM_TYPE_UNKNOWN;
  1034. }
  1035. } break;
  1036. case LLM_ARCH_DEEPSEEK2:
  1037. {
  1038. bool is_lite = (hparams.n_layer == 27);
  1039. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1040. ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
  1041. if (!is_lite) {
  1042. ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
  1043. }
  1044. ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
  1045. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
  1046. ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
  1047. ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
  1048. ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false);
  1049. ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false);
  1050. if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
  1051. // for compatibility with existing DeepSeek V2 and V2.5 GGUFs
  1052. // that have no expert_gating_func model parameter set
  1053. hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX;
  1054. }
  1055. ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul);
  1056. switch (hparams.n_layer) {
  1057. case 27: type = LLM_TYPE_16B; break;
  1058. case 60: type = LLM_TYPE_236B; break;
  1059. case 61: type = LLM_TYPE_671B; break;
  1060. default: type = LLM_TYPE_UNKNOWN;
  1061. }
  1062. } break;
  1063. case LLM_ARCH_PLM:
  1064. {
  1065. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1066. ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
  1067. switch (hparams.n_layer) {
  1068. case 32: type = LLM_TYPE_1_8B; break;
  1069. default: type = LLM_TYPE_UNKNOWN;
  1070. }
  1071. } break;
  1072. case LLM_ARCH_CHATGLM:
  1073. {
  1074. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1075. switch (hparams.n_layer) {
  1076. case 28: {
  1077. if (hparams.n_head(0) == 16) {
  1078. type = LLM_TYPE_1_5B;
  1079. } else {
  1080. type = LLM_TYPE_6B;
  1081. }
  1082. } break;
  1083. case 40: {
  1084. if (hparams.n_head(0) == 24) {
  1085. type = LLM_TYPE_4B;
  1086. } else {
  1087. type = LLM_TYPE_9B;
  1088. }
  1089. } break;
  1090. default: type = LLM_TYPE_UNKNOWN;
  1091. }
  1092. } break;
  1093. case LLM_ARCH_GLM4:
  1094. {
  1095. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1096. switch (hparams.n_layer) {
  1097. case 40: type = LLM_TYPE_9B; break;
  1098. case 61: type = LLM_TYPE_32B; break;
  1099. default: type = LLM_TYPE_UNKNOWN;
  1100. }
  1101. } break;
  1102. case LLM_ARCH_BITNET:
  1103. {
  1104. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1105. switch (hparams.n_layer) {
  1106. case 26: type = LLM_TYPE_3B; break;
  1107. default: type = LLM_TYPE_UNKNOWN;
  1108. }
  1109. } break;
  1110. case LLM_ARCH_T5:
  1111. {
  1112. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1113. ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
  1114. uint32_t dec_start_token_id;
  1115. if (ml.get_key(LLM_KV_DECODER_START_TOKEN_ID, dec_start_token_id, false)) {
  1116. hparams.dec_start_token_id = dec_start_token_id;
  1117. }
  1118. switch (hparams.n_layer) {
  1119. case 6: type = LLM_TYPE_60M; break; // t5-small
  1120. case 8: type = LLM_TYPE_80M; break; // flan-t5-small
  1121. case 12:
  1122. switch (hparams.n_ff()) {
  1123. case 3072: type = LLM_TYPE_220M; break; // t5-base
  1124. case 2048: type = LLM_TYPE_250M; break; // flan-t5-base
  1125. default: type = LLM_TYPE_UNKNOWN;
  1126. } break;
  1127. case 24:
  1128. switch (hparams.n_ff()) {
  1129. case 4096: type = LLM_TYPE_770M; break; // t5-large
  1130. case 2816: type = LLM_TYPE_780M; break; // flan-t5-large
  1131. case 16384: type = LLM_TYPE_3B; break; // t5-3b
  1132. case 5120: type = LLM_TYPE_3B; break; // flan-t5-xl
  1133. case 65536: type = LLM_TYPE_11B; break; // t5-11b
  1134. case 10240: type = LLM_TYPE_11B; break; // flan-t5-xxl
  1135. default: type = LLM_TYPE_UNKNOWN;
  1136. } break;
  1137. default: type = LLM_TYPE_UNKNOWN;
  1138. }
  1139. } break;
  1140. case LLM_ARCH_T5ENCODER:
  1141. {
  1142. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1143. ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
  1144. type = LLM_TYPE_UNKNOWN;
  1145. } break;
  1146. case LLM_ARCH_JAIS:
  1147. {
  1148. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  1149. ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
  1150. switch (hparams.n_layer) {
  1151. case 24: type = LLM_TYPE_1_3B; break;
  1152. case 40: type = LLM_TYPE_13B; break;
  1153. /* TODO: add variants */
  1154. default: type = LLM_TYPE_UNKNOWN;
  1155. }
  1156. } break;
  1157. case LLM_ARCH_NEMOTRON:
  1158. {
  1159. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  1160. switch (hparams.n_layer) {
  1161. case 32: type = LLM_TYPE_4B; break;
  1162. default: type = LLM_TYPE_UNKNOWN;
  1163. }
  1164. } break;
  1165. case LLM_ARCH_EXAONE:
  1166. {
  1167. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1168. switch (hparams.n_layer) {
  1169. case 32: type = LLM_TYPE_8B; break;
  1170. default: type = LLM_TYPE_UNKNOWN;
  1171. }
  1172. } break;
  1173. case LLM_ARCH_RWKV6:
  1174. case LLM_ARCH_RWKV6QWEN2:
  1175. {
  1176. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps, false);
  1177. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps, false);
  1178. ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size);
  1179. ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim);
  1180. ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim);
  1181. ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false);
  1182. ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT, hparams.token_shift_count, false);
  1183. switch (hparams.n_layer) {
  1184. case 24: type = LLM_TYPE_1_6B; break;
  1185. case 32:
  1186. switch (hparams.n_embd) {
  1187. case 2560: type = LLM_TYPE_3B; break;
  1188. case 4096: type = LLM_TYPE_7B; break;
  1189. default: type = LLM_TYPE_UNKNOWN;
  1190. } break;
  1191. case 61: type = LLM_TYPE_14B; break;
  1192. case 64: type = LLM_TYPE_32B; break;
  1193. default: type = LLM_TYPE_UNKNOWN;
  1194. }
  1195. } break;
  1196. case LLM_ARCH_RWKV7:
  1197. case LLM_ARCH_ARWKV7:
  1198. {
  1199. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps, false);
  1200. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps, false);
  1201. ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size);
  1202. ml.get_key(LLM_KV_ATTENTION_DECAY_LORA_RANK, hparams.n_lora_decay);
  1203. ml.get_key(LLM_KV_ATTENTION_ICLR_LORA_RANK, hparams.n_lora_iclr);
  1204. ml.get_key(LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, hparams.n_lora_value_res_mix);
  1205. ml.get_key(LLM_KV_ATTENTION_GATE_LORA_RANK, hparams.n_lora_gate, false);
  1206. ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT, hparams.token_shift_count, false);
  1207. switch (hparams.n_layer) {
  1208. case 12: type = LLM_TYPE_190M; break;
  1209. case 24:
  1210. switch (hparams.n_embd) {
  1211. case 1024: type = LLM_TYPE_450M; break;
  1212. case 2048: type = LLM_TYPE_1_5B; break;
  1213. default: type = LLM_TYPE_UNKNOWN;
  1214. } break;
  1215. case 28:
  1216. switch (hparams.n_embd) {
  1217. case 1536: type = LLM_TYPE_1_5B; break;
  1218. case 3584: type = LLM_TYPE_7B; break;
  1219. default: type = LLM_TYPE_UNKNOWN;
  1220. } break;
  1221. case 32: type = LLM_TYPE_2_9B; break; // RWKV-7-World
  1222. default: type = LLM_TYPE_UNKNOWN;
  1223. }
  1224. } break;
  1225. case LLM_ARCH_GRANITE:
  1226. case LLM_ARCH_GRANITE_MOE:
  1227. {
  1228. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1229. ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
  1230. ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
  1231. ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
  1232. ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale);
  1233. switch (hparams.n_layer) {
  1234. case 32: type = LLM_TYPE_3B; break;
  1235. case 40: type = LLM_TYPE_3B; break;
  1236. // Add additional layer/vocab/etc checks here for other model sizes
  1237. default: type = LLM_TYPE_UNKNOWN;
  1238. }
  1239. } break;
  1240. case LLM_ARCH_CHAMELEON:
  1241. {
  1242. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1243. hparams.f_norm_eps = 1e-5; // eps for qk-norm, torch default
  1244. ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm);
  1245. switch (hparams.n_layer) {
  1246. case 32: type = LLM_TYPE_7B; break;
  1247. case 48: type = LLM_TYPE_34B; break;
  1248. default: type = LLM_TYPE_UNKNOWN;
  1249. }
  1250. } break;
  1251. case LLM_ARCH_WAVTOKENIZER_DEC:
  1252. {
  1253. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  1254. ml.get_key(LLM_KV_ATTENTION_GROUPNORM_EPS, hparams.f_norm_group_eps);
  1255. ml.get_key(LLM_KV_ATTENTION_GROUPNORM_GROUPS, hparams.n_norm_groups);
  1256. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  1257. } break;
  1258. case LLM_ARCH_BAILINGMOE:
  1259. {
  1260. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  1261. ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
  1262. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
  1263. ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
  1264. ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
  1265. ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false);
  1266. switch (hparams.n_layer) {
  1267. case 28: type = LLM_TYPE_16B; break;
  1268. case 88: type = LLM_TYPE_290B; break;
  1269. default: type = LLM_TYPE_UNKNOWN;
  1270. }
  1271. } break;
  1272. default: throw std::runtime_error("unsupported model architecture");
  1273. }
  1274. pimpl->n_bytes = ml.n_bytes;
  1275. pimpl->desc_str = arch_name() + " " + type_name() + " " + ml.ftype_name();
  1276. if (hparams.f_max_alibi_bias > 0.0f) {
  1277. hparams.use_alibi = true;
  1278. }
  1279. hparams.rope_type = llama_model_rope_type(this);
  1280. }
  1281. void llama_model::load_vocab(llama_model_loader & ml) {
  1282. const auto kv = LLM_KV(arch);
  1283. vocab.load(ml, kv);
  1284. }
  1285. bool llama_model::load_tensors(llama_model_loader & ml) {
  1286. const auto & split_mode = params.split_mode;
  1287. const auto & n_gpu_layers = params.n_gpu_layers;
  1288. const auto & use_mlock = params.use_mlock;
  1289. const auto & tensor_split = params.tensor_split;
  1290. const int n_layer = hparams.n_layer;
  1291. const bool use_mmap_buffer = true;
  1292. LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false");
  1293. // build a list of buffer types for the CPU and GPU devices
  1294. pimpl->cpu_buft_list = make_cpu_buft_list(devices);
  1295. for (auto * dev : devices) {
  1296. buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split);
  1297. // add CPU buffer types as a fallback
  1298. buft_list.insert(buft_list.end(), pimpl->cpu_buft_list.begin(), pimpl->cpu_buft_list.end());
  1299. pimpl->gpu_buft_list.emplace(dev, std::move(buft_list));
  1300. }
  1301. // calculate the split points
  1302. bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + n_devices(), [](float x) { return x == 0.0f; });
  1303. std::vector<float> splits(n_devices());
  1304. if (all_zero) {
  1305. // default split, by free memory
  1306. for (size_t i = 0; i < n_devices(); ++i) {
  1307. ggml_backend_dev_t dev = devices[i];
  1308. size_t total;
  1309. size_t free;
  1310. ggml_backend_dev_memory(dev, &free, &total);
  1311. splits[i] = free;
  1312. }
  1313. } else {
  1314. std::copy(tensor_split, tensor_split + n_devices(), splits.begin());
  1315. }
  1316. // sum and normalize the splits to get the split points
  1317. float split_sum = 0.0f;
  1318. for (size_t i = 0; i < n_devices(); ++i) {
  1319. split_sum += splits[i];
  1320. splits[i] = split_sum;
  1321. }
  1322. for (size_t i = 0; i < n_devices(); ++i) {
  1323. splits[i] /= split_sum;
  1324. }
  1325. ggml_backend_dev_t cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
  1326. const int i_gpu_start = std::max((int) hparams.n_layer - n_gpu_layers, (int) 0);
  1327. const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, (int)n_layer + 1);
  1328. auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev {
  1329. const bool is_swa = il < (int) hparams.n_layer && hparams.is_swa(il);
  1330. if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) {
  1331. LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s, is_swa = %d\n", il, ggml_backend_dev_name(cpu_dev), is_swa);
  1332. return {cpu_dev, &pimpl->cpu_buft_list};
  1333. }
  1334. const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin();
  1335. auto * dev = devices.at(layer_gpu);
  1336. LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s, is_swa = %d\n", il, ggml_backend_dev_name(dev), is_swa);
  1337. return {dev, &pimpl->gpu_buft_list.at(dev)};
  1338. };
  1339. // assign the input layer
  1340. // there is very little benefit to offloading the input layer, so always keep it on the CPU
  1341. pimpl->dev_input = { cpu_dev, &pimpl->cpu_buft_list };
  1342. // assign the repeating layers to the devices according to the splits
  1343. pimpl->dev_layer.resize(n_layer);
  1344. for (int il = 0; il < n_layer; ++il) {
  1345. pimpl->dev_layer[il] = get_layer_buft_list(il);
  1346. }
  1347. // assign the output layer
  1348. pimpl->dev_output = get_layer_buft_list(n_layer);
  1349. // one ggml context per buffer type
  1350. int max_n_tensors = ml.n_tensors;
  1351. max_n_tensors += 1; // duplicated output tensor
  1352. max_n_tensors += n_layer*2; // duplicated rope freq tensors
  1353. const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors;
  1354. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  1355. auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
  1356. auto it = ctx_map.find(buft);
  1357. if (it == ctx_map.end()) {
  1358. ggml_init_params params = {
  1359. /*.mem_size =*/ ctx_size,
  1360. /*.mem_buffer =*/ NULL,
  1361. /*.no_alloc =*/ true,
  1362. };
  1363. ggml_context * ctx = ggml_init(params);
  1364. if (!ctx) {
  1365. throw std::runtime_error(format("failed to create ggml context"));
  1366. }
  1367. ctx_map[buft] = ctx;
  1368. pimpl->ctxs.emplace_back(ctx);
  1369. return ctx;
  1370. }
  1371. return it->second;
  1372. };
  1373. const auto TENSOR_DUPLICATED = llama_model_loader::TENSOR_DUPLICATED;
  1374. const auto TENSOR_NOT_REQUIRED = llama_model_loader::TENSOR_NOT_REQUIRED;
  1375. // create tensors for the weights
  1376. {
  1377. // note: cast to int64_t since we will use these for the tensor dimensions
  1378. const int64_t n_head = hparams.n_head();
  1379. const int64_t n_head_kv = hparams.n_head_kv();
  1380. const int64_t n_embd = hparams.n_embd;
  1381. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  1382. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  1383. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  1384. const int64_t n_embd_head_v = hparams.n_embd_head_v;
  1385. const int64_t n_ff = hparams.n_ff();
  1386. const int64_t n_embd_gqa = n_embd_v_gqa;
  1387. const int64_t n_vocab = vocab.n_tokens();
  1388. const int64_t n_token_types = vocab.n_token_types();
  1389. const int64_t n_rot = hparams.n_rot;
  1390. const int64_t n_expert = hparams.n_expert;
  1391. const int64_t n_expert_used = hparams.n_expert_used;
  1392. const int64_t n_ctx_train = hparams.n_ctx_train;
  1393. if (n_expert > 0 && hparams.n_expert_used == 0) {
  1394. throw std::runtime_error("model has expert layers but no expert layers are used");
  1395. }
  1396. int n_moved_tensors = 0;
  1397. ggml_tensor * first_moved_tensor = nullptr;
  1398. ggml_backend_buffer_type_t first_moved_from_buft = nullptr;
  1399. ggml_backend_buffer_type_t first_moved_to_buft = nullptr;
  1400. auto create_tensor = [&](const LLM_TN_IMPL & tn, const std::initializer_list<int64_t> & ne, int flags) -> ggml_tensor * {
  1401. ggml_tensor * t_meta = ml.get_tensor_meta(tn.str().c_str());
  1402. if (!t_meta) {
  1403. if (flags & TENSOR_NOT_REQUIRED) {
  1404. return nullptr;
  1405. }
  1406. throw std::runtime_error(format("missing tensor '%s'", tn.str().c_str()));
  1407. }
  1408. // some models use the token embedding tensor as the output, but since these are used in different layers and with different ops
  1409. // the tensor is duplicated
  1410. // to handle this, we check if the tensor is duplicated, and if so, we assume that it is being loaded as the output tensor
  1411. llm_tensor tn_tensor = tn.tensor;
  1412. if (tn.tensor == LLM_TENSOR_TOKEN_EMBD && flags & TENSOR_DUPLICATED) {
  1413. tn_tensor = LLM_TENSOR_OUTPUT;
  1414. }
  1415. llm_tensor_info info;
  1416. try {
  1417. info = llm_tensor_info_for(tn_tensor);
  1418. } catch (const std::out_of_range & e) {
  1419. throw std::runtime_error(format("missing tensor info mapping for %s", tn.str().c_str()));
  1420. }
  1421. // skip unused tensors
  1422. if (info.op == GGML_OP_NONE) {
  1423. const size_t nbytes = ggml_nbytes(t_meta);
  1424. LLAMA_LOG_WARN("model has unused tensor %s (size = %zu bytes) -- ignoring\n", tn.str().c_str(), nbytes);
  1425. ml.size_data -= nbytes;
  1426. ml.n_created++;
  1427. return nullptr;
  1428. }
  1429. // tensors with "bias" suffix are always used with GGML_OP_ADD
  1430. ggml_op op;
  1431. bool bias = tn.suffix != nullptr && strcmp(tn.suffix, "bias") == 0;
  1432. if (bias) {
  1433. op = GGML_OP_ADD;
  1434. } else {
  1435. op = info.op;
  1436. }
  1437. // sanity checks
  1438. if (info.layer == LLM_TENSOR_LAYER_INPUT || info.layer == LLM_TENSOR_LAYER_OUTPUT) {
  1439. if (tn.bid != -1) {
  1440. GGML_ABORT("input/output layer tensor %s used with a layer number", tn.str().c_str());
  1441. }
  1442. } else {
  1443. if (tn.bid == -1) {
  1444. GGML_ABORT("repeating layer tensor %s used without a layer number", tn.str().c_str());
  1445. }
  1446. }
  1447. // select the buffer type for this tensor
  1448. buft_list_t * buft_list;
  1449. switch (info.layer) {
  1450. case LLM_TENSOR_LAYER_INPUT:
  1451. buft_list = pimpl->dev_input.buft_list;
  1452. break;
  1453. case LLM_TENSOR_LAYER_OUTPUT:
  1454. buft_list = pimpl->dev_output.buft_list;
  1455. break;
  1456. case LLM_TENSOR_LAYER_REPEATING:
  1457. buft_list = pimpl->dev_layer.at(tn.bid).buft_list;
  1458. break;
  1459. default:
  1460. GGML_ABORT("invalid layer %d for tensor %s", info.layer, tn.str().c_str());
  1461. }
  1462. ggml_backend_buffer_type_t buft = nullptr;
  1463. // check overrides
  1464. if (ml.tensor_buft_overrides) {
  1465. std::string tensor_name = tn.str();
  1466. for (const auto * overrides = ml.tensor_buft_overrides; overrides->pattern != nullptr; ++overrides) {
  1467. std::regex pattern(overrides->pattern);
  1468. if (std::regex_search(tensor_name, pattern)) {
  1469. LLAMA_LOG_DEBUG("tensor %s buffer type overriden to %s\n", tensor_name.c_str(), ggml_backend_buft_name(overrides->buft));
  1470. buft = overrides->buft;
  1471. break;
  1472. }
  1473. }
  1474. }
  1475. if (!buft) {
  1476. buft = select_weight_buft(hparams, t_meta, op, *buft_list);
  1477. if (!buft) {
  1478. throw std::runtime_error(format("failed to find a compatible buffer type for tensor %s", tn.str().c_str()));
  1479. }
  1480. }
  1481. // avoid using a host buffer when using mmap
  1482. auto * buft_dev = ggml_backend_buft_get_device(buft);
  1483. if (ml.use_mmap && buft_dev && buft == ggml_backend_dev_host_buffer_type(buft_dev)) {
  1484. auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
  1485. buft = ggml_backend_dev_buffer_type(cpu_dev);
  1486. }
  1487. if (buft != buft_list->front().second) {
  1488. n_moved_tensors++;
  1489. if (!first_moved_tensor) {
  1490. first_moved_tensor = t_meta;
  1491. first_moved_from_buft = buft_list->front().second;
  1492. first_moved_to_buft = buft;
  1493. }
  1494. }
  1495. ggml_context * ctx = ctx_for_buft(buft);
  1496. // if duplicated, check if the original tensor was allocated in the same buffer type context and avoid creating a new one
  1497. if (flags & TENSOR_DUPLICATED) {
  1498. ggml_tensor * t = ggml_get_tensor(ctx, tn.str().c_str());
  1499. if (t) {
  1500. return t;
  1501. }
  1502. }
  1503. return ml.create_tensor(ctx, tn, ne, flags);
  1504. };
  1505. layers.resize(n_layer);
  1506. // TODO: move to a separate function
  1507. const auto tn = LLM_TN(arch);
  1508. switch (arch) {
  1509. case LLM_ARCH_LLAMA:
  1510. case LLM_ARCH_REFACT:
  1511. case LLM_ARCH_MINICPM:
  1512. case LLM_ARCH_GRANITE:
  1513. case LLM_ARCH_GRANITE_MOE:
  1514. {
  1515. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1516. // output
  1517. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1518. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1519. // if output is NULL, init from the input tok embed
  1520. if (output == NULL) {
  1521. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1522. }
  1523. for (int i = 0; i < n_layer; ++i) {
  1524. auto & layer = layers[i];
  1525. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1526. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  1527. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  1528. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  1529. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  1530. // optional bias tensors
  1531. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1532. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1533. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1534. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1535. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1536. if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
  1537. layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1538. layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1539. }
  1540. else {
  1541. layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1542. }
  1543. if (n_expert == 0) {
  1544. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1545. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1546. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1547. // optional MLP bias
  1548. layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  1549. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1550. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  1551. } else {
  1552. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  1553. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, TENSOR_NOT_REQUIRED);
  1554. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0);
  1555. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  1556. }
  1557. }
  1558. } break;
  1559. case LLM_ARCH_LLAMA4:
  1560. {
  1561. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1562. // output
  1563. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1564. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1565. // if output is NULL, init from the input tok embed
  1566. if (output == NULL) {
  1567. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1568. }
  1569. GGML_ASSERT(hparams.n_moe_layer_step > 0 && "Llama 4 requires n_moe_layer_step > 0");
  1570. for (int i = 0; i < n_layer; ++i) {
  1571. bool is_moe_layer = (i + 1) % hparams.n_moe_layer_step == 0;
  1572. auto & layer = layers[i];
  1573. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1574. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  1575. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  1576. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  1577. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  1578. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1579. layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1580. if (is_moe_layer) {
  1581. int n_ff_exp = hparams.n_ff_exp;
  1582. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  1583. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, 0);
  1584. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert}, 0);
  1585. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, 0);
  1586. // Shared expert
  1587. const int64_t n_ff_shexp = n_ff_exp;
  1588. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), { n_embd, n_ff_shexp}, 0);
  1589. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd }, 0);
  1590. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), { n_embd, n_ff_shexp}, 0);
  1591. } else {
  1592. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1593. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1594. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1595. }
  1596. }
  1597. } break;
  1598. case LLM_ARCH_DECI:
  1599. {
  1600. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1601. // output
  1602. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1603. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1604. // if output is NULL, init from the input tok embed
  1605. if (output == NULL) {
  1606. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1607. }
  1608. for (int i = 0; i < n_layer; ++i) {
  1609. auto & layer = layers[i];
  1610. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(i);
  1611. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(i);
  1612. const int64_t n_embd_gqa = hparams.n_embd_v_gqa(i);
  1613. const int64_t n_ff = hparams.n_ff(i);
  1614. const int64_t n_head = hparams.n_head(i);
  1615. const int64_t n_head_kv = hparams.n_head_kv(i);
  1616. if (n_head_kv == 0 && n_head > 0) {
  1617. // linear attention for DeciLMCausalModel
  1618. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1619. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1620. }
  1621. else if (n_head_kv > 0) {
  1622. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1623. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  1624. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  1625. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  1626. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  1627. }
  1628. // optional bias tensors
  1629. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1630. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1631. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1632. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1633. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1634. if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
  1635. layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1636. layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1637. }
  1638. else {
  1639. layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1640. }
  1641. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1642. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1643. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1644. // optional MLP bias
  1645. layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  1646. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1647. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  1648. }
  1649. } break;
  1650. case LLM_ARCH_MINICPM3:
  1651. {
  1652. const int64_t n_embd_head_qk_rope = hparams.n_rot;
  1653. const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  1654. const int64_t q_lora_rank = hparams.n_lora_q;
  1655. const int64_t kv_lora_rank = hparams.n_lora_kv;
  1656. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1657. // output
  1658. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1659. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1660. // if output is NULL, init from the input tok embed
  1661. if (output == NULL) {
  1662. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1663. }
  1664. for (int i = 0; i < n_layer; ++i) {
  1665. auto & layer = layers[i];
  1666. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1667. layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
  1668. layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
  1669. layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
  1670. layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k}, 0);
  1671. layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
  1672. layer.wkv_b = create_tensor(tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
  1673. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_head * ( n_embd_head_v), n_embd}, 0);
  1674. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1675. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1676. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1677. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1678. layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head_qk_rope/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1679. layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head_qk_rope/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  1680. }
  1681. } break;
  1682. case LLM_ARCH_GROK:
  1683. {
  1684. if (n_expert == 0) {
  1685. throw std::runtime_error("Grok model cannot have zero experts");
  1686. }
  1687. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1688. // output
  1689. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1690. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1691. // if output is NULL, init from the input tok embed
  1692. if (output == NULL) {
  1693. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1694. }
  1695. for (int i = 0; i < n_layer; ++i) {
  1696. auto & layer = layers[i];
  1697. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1698. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  1699. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  1700. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  1701. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1702. layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
  1703. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1704. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  1705. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, TENSOR_NOT_REQUIRED);
  1706. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0);
  1707. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  1708. layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
  1709. }
  1710. } break;
  1711. case LLM_ARCH_DBRX:
  1712. {
  1713. if (n_expert == 0) {
  1714. throw std::runtime_error("DBRX model cannot have zero experts");
  1715. }
  1716. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1717. // output
  1718. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1719. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  1720. for (int i = 0; i < n_layer; ++i) {
  1721. auto & layer = layers[i];
  1722. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1723. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  1724. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1725. layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
  1726. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  1727. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  1728. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0);
  1729. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  1730. }
  1731. } break;
  1732. case LLM_ARCH_BAICHUAN:
  1733. {
  1734. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1735. {
  1736. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1737. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  1738. }
  1739. for (int i = 0; i < n_layer; ++i) {
  1740. auto & layer = layers[i];
  1741. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1742. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  1743. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  1744. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  1745. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1746. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1747. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1748. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1749. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1750. }
  1751. } break;
  1752. case LLM_ARCH_FALCON:
  1753. {
  1754. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1755. // output
  1756. {
  1757. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1758. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  1759. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1760. if (!output) {
  1761. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // needs to be on GPU
  1762. }
  1763. }
  1764. for (int i = 0; i < n_layer; ++i) {
  1765. auto & layer = layers[i];
  1766. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1767. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  1768. layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1769. layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1770. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  1771. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1772. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1773. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1774. }
  1775. } break;
  1776. case LLM_ARCH_STARCODER:
  1777. {
  1778. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1779. pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0);
  1780. // output
  1781. {
  1782. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1783. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  1784. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1785. if (!output) {
  1786. // needs to be on GPU
  1787. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1788. }
  1789. }
  1790. for (int i = 0; i < n_layer; ++i) {
  1791. auto & layer = layers[i];
  1792. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1793. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  1794. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  1795. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  1796. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1797. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  1798. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1799. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  1800. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  1801. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  1802. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1803. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  1804. }
  1805. } break;
  1806. case LLM_ARCH_BERT:
  1807. case LLM_ARCH_NOMIC_BERT:
  1808. {
  1809. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1810. type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, 0);
  1811. if (arch == LLM_ARCH_BERT) {
  1812. pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0);
  1813. cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED);
  1814. cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {n_embd}, TENSOR_NOT_REQUIRED);
  1815. cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, 1}, TENSOR_NOT_REQUIRED);
  1816. cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {1}, TENSOR_NOT_REQUIRED);
  1817. }
  1818. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
  1819. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
  1820. for (int i = 0; i < n_layer; ++i) {
  1821. auto & layer = layers[i];
  1822. if (arch == LLM_ARCH_BERT) {
  1823. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  1824. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  1825. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  1826. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  1827. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  1828. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  1829. } else {
  1830. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  1831. }
  1832. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1833. layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
  1834. layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd}, 0);
  1835. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1836. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  1837. if (arch == LLM_ARCH_BERT) {
  1838. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  1839. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  1840. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  1841. } else {
  1842. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1843. }
  1844. layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
  1845. layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}, 0);
  1846. }
  1847. } break;
  1848. case LLM_ARCH_JINA_BERT_V2:
  1849. {
  1850. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); // word_embeddings
  1851. type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, 0); // token_type_embeddings
  1852. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); // LayerNorm
  1853. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0); //LayerNorm bias
  1854. cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, 1}, TENSOR_NOT_REQUIRED);
  1855. cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {1}, TENSOR_NOT_REQUIRED);
  1856. for (int i = 0; i < n_layer; ++i) {
  1857. auto & layer = layers[i]; // JinaBertLayer
  1858. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  1859. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  1860. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1861. layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1862. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  1863. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  1864. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1865. layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1866. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  1867. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  1868. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); //output_dens
  1869. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); //output_dens
  1870. layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0); //output_norm
  1871. layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd}, 0);
  1872. layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1873. layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1874. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1875. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1876. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  1877. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  1878. layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
  1879. layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}, 0);
  1880. }
  1881. } break;
  1882. case LLM_ARCH_BLOOM:
  1883. {
  1884. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1885. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
  1886. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
  1887. // output
  1888. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1889. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  1890. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1891. // if output is NULL, init from the input tok embed
  1892. if (output == NULL) {
  1893. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  1894. }
  1895. for (int i = 0; i < n_layer; ++i) {
  1896. auto & layer = layers[i];
  1897. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1898. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  1899. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  1900. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  1901. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1902. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  1903. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1904. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  1905. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  1906. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  1907. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1908. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  1909. }
  1910. } break;
  1911. case LLM_ARCH_MPT:
  1912. {
  1913. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1914. pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, TENSOR_NOT_REQUIRED);
  1915. // output
  1916. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1917. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, TENSOR_NOT_REQUIRED);
  1918. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1919. if (!output) {
  1920. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // needs to be on GPU
  1921. }
  1922. for (int i = 0; i < n_layer; ++i) {
  1923. auto & layer = layers[i];
  1924. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1925. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1926. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  1927. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1928. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1929. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1930. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1931. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1932. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  1933. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1934. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1935. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  1936. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1937. layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1938. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1939. layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1940. // AWQ ScaleActivation layer
  1941. layer.ffn_act = create_tensor(tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, TENSOR_NOT_REQUIRED);
  1942. }
  1943. } break;
  1944. case LLM_ARCH_STABLELM:
  1945. {
  1946. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1947. // output
  1948. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  1949. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1950. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  1951. for (int i = 0; i < n_layer; ++i) {
  1952. auto & layer = layers[i];
  1953. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1954. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  1955. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  1956. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  1957. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  1958. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1959. // optional bias tensors, present in Stable LM 2 1.6B
  1960. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1961. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1962. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  1963. // optional q and k layernorms, present in StableLM 2 12B
  1964. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, TENSOR_NOT_REQUIRED);
  1965. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, TENSOR_NOT_REQUIRED);
  1966. // optional FFN norm, not present in StableLM 2 12B which uses parallel residual
  1967. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1968. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  1969. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  1970. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  1971. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  1972. }
  1973. } break;
  1974. case LLM_ARCH_QWEN:
  1975. {
  1976. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1977. // output
  1978. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1979. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  1980. for (int i = 0; i < n_layer; ++i) {
  1981. auto & layer = layers[i];
  1982. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  1983. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3}, 0);
  1984. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd*3}, 0);
  1985. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  1986. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  1987. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2}, 0);
  1988. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd}, 0);
  1989. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff/2}, 0);
  1990. }
  1991. } break;
  1992. case LLM_ARCH_QWEN2:
  1993. case LLM_ARCH_QWEN2VL:
  1994. {
  1995. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  1996. // output
  1997. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  1998. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  1999. // if output is NULL, init from the input tok embed
  2000. if (output == NULL) {
  2001. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2002. }
  2003. for (int i = 0; i < n_layer; ++i) {
  2004. auto & layer = layers[i];
  2005. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2006. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2007. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2008. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2009. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2010. // optional bias tensors
  2011. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  2012. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  2013. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  2014. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2015. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2016. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2017. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2018. }
  2019. } break;
  2020. case LLM_ARCH_QWEN2MOE:
  2021. {
  2022. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2023. // output
  2024. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2025. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2026. for (int i = 0; i < n_layer; ++i) {
  2027. auto & layer = layers[i];
  2028. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2029. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2030. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2031. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2032. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2033. // optional bias tensors
  2034. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2035. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2036. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2037. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2038. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2039. if (n_expert == 0) {
  2040. throw std::runtime_error("n_expert must be > 0 for QWEN2MOE");
  2041. }
  2042. if (n_expert_used == 0) {
  2043. throw std::runtime_error("n_expert_used must be > 0 for QWEN2MOE");
  2044. }
  2045. // MoE branch
  2046. const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
  2047. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2048. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  2049. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2050. // Shared expert branch
  2051. const int64_t n_ff_shexp = hparams.n_ff_shexp ? hparams.n_ff_shexp : n_ff;
  2052. layer.ffn_gate_inp_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), {n_embd}, 0);
  2053. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), { n_embd, n_ff_shexp}, 0);
  2054. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, 0);
  2055. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), { n_embd, n_ff_shexp}, 0);
  2056. }
  2057. } break;
  2058. case LLM_ARCH_QWEN3:
  2059. {
  2060. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2061. // output
  2062. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2063. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2064. // if output is NULL, init from the input tok embed
  2065. if (output == NULL) {
  2066. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2067. }
  2068. for (int i = 0; i < n_layer; ++i) {
  2069. auto & layer = layers[i];
  2070. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2071. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2072. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2073. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2074. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2075. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
  2076. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
  2077. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2078. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2079. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2080. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2081. }
  2082. } break;
  2083. case LLM_ARCH_QWEN3MOE:
  2084. {
  2085. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2086. // output
  2087. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2088. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2089. for (int i = 0; i < n_layer; ++i) {
  2090. auto & layer = layers[i];
  2091. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2092. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2093. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2094. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2095. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2096. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
  2097. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
  2098. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2099. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2100. if (n_expert == 0) {
  2101. throw std::runtime_error("n_expert must be > 0 for QWEN3MOE");
  2102. }
  2103. if (n_expert_used == 0) {
  2104. throw std::runtime_error("n_expert_used must be > 0 for QWEN3MOE");
  2105. }
  2106. // MoE branch
  2107. const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
  2108. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2109. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  2110. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2111. }
  2112. } break;
  2113. case LLM_ARCH_PHI2:
  2114. {
  2115. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2116. // output
  2117. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2118. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2119. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2120. output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, 0);
  2121. for (int i = 0; i < n_layer; ++i) {
  2122. auto & layer = layers[i];
  2123. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2124. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2125. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2126. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2127. if (layer.wqkv == nullptr) {
  2128. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2129. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  2130. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2131. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  2132. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2133. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  2134. }
  2135. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2136. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2137. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2138. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2139. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2140. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  2141. }
  2142. } break;
  2143. case LLM_ARCH_PHI3:
  2144. {
  2145. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
  2146. // output
  2147. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
  2148. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2149. // if output is NULL, init from the input tok embed
  2150. if (output == NULL) {
  2151. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2152. }
  2153. for (int i = 0; i < n_layer; ++i) {
  2154. auto & layer = layers[i];
  2155. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
  2156. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, TENSOR_NOT_REQUIRED);
  2157. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
  2158. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
  2159. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
  2160. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff }, 0);
  2161. layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  2162. layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  2163. }
  2164. } break;
  2165. case LLM_ARCH_PHIMOE:
  2166. {
  2167. const int64_t n_embd_head = n_embd / n_head;
  2168. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
  2169. // output
  2170. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
  2171. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2172. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, 0);
  2173. output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), { n_vocab }, 0);
  2174. for (int i = 0; i < n_layer; ++i) {
  2175. auto & layer = layers[i];
  2176. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
  2177. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), { n_embd }, 0);
  2178. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, TENSOR_NOT_REQUIRED);
  2179. if (layer.wqkv == nullptr) {
  2180. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2181. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  2182. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2183. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  2184. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2185. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  2186. }
  2187. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
  2188. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, 0);
  2189. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
  2190. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), { n_embd }, 0);
  2191. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2192. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  2193. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0);
  2194. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  2195. layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  2196. layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  2197. }
  2198. } break;
  2199. case LLM_ARCH_PLAMO:
  2200. {
  2201. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2202. // output
  2203. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2204. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2205. for (int i = 0; i < n_layer; ++i) {
  2206. auto & layer = layers[i];
  2207. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2208. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2209. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2210. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2211. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2212. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2213. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2214. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2215. }
  2216. } break;
  2217. case LLM_ARCH_GPT2:
  2218. {
  2219. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2220. pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0);
  2221. // output
  2222. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2223. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2224. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2225. // if output is NULL, init from the input tok embed
  2226. if (output == NULL) {
  2227. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2228. }
  2229. for (int i = 0; i < n_layer; ++i) {
  2230. auto & layer = layers[i];
  2231. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2232. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2233. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2234. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  2235. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2236. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2237. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2238. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2239. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2240. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2241. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2242. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  2243. }
  2244. } break;
  2245. case LLM_ARCH_CODESHELL:
  2246. {
  2247. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2248. // if tok embd is NULL, init from output
  2249. if (tok_embd == NULL) {
  2250. tok_embd = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2251. }
  2252. // output
  2253. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2254. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2255. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2256. for (int i = 0; i < n_layer; ++i) {
  2257. auto & layer = layers[i];
  2258. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2259. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2260. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2261. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  2262. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2263. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2264. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2265. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2266. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2267. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2268. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2269. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  2270. }
  2271. } break;
  2272. case LLM_ARCH_ORION:
  2273. {
  2274. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2275. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2276. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2277. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2278. for (int i = 0; i < n_layer; ++i) {
  2279. auto & layer = layers[i];
  2280. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2281. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2282. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2283. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2284. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2285. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2286. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2287. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2288. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2289. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2290. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2291. }
  2292. } break;
  2293. case LLM_ARCH_INTERNLM2:
  2294. {
  2295. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2296. // output
  2297. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2298. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2299. for (int i = 0; i < n_layer; ++i) {
  2300. auto & layer = layers[i];
  2301. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2302. // layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2303. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2304. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2305. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2306. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2307. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2308. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2309. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2310. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2311. }
  2312. } break;
  2313. case LLM_ARCH_GEMMA:
  2314. {
  2315. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2316. // output
  2317. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2318. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
  2319. for (int i = 0; i < n_layer; ++i) {
  2320. auto & layer = layers[i];
  2321. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2322. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2323. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2324. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2325. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2326. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2327. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2328. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2329. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2330. }
  2331. } break;
  2332. case LLM_ARCH_GEMMA2:
  2333. {
  2334. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2335. // output
  2336. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2337. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
  2338. for (int i = 0; i < n_layer; ++i) {
  2339. auto & layer = layers[i];
  2340. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2341. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2342. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2343. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2344. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2345. layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
  2346. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2347. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2348. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2349. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2350. layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
  2351. }
  2352. } break;
  2353. case LLM_ARCH_GEMMA3:
  2354. {
  2355. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2356. // output
  2357. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2358. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2359. // if output is NULL, init from the input tok embed
  2360. if (output == NULL) {
  2361. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2362. }
  2363. for (int i = 0; i < n_layer; ++i) {
  2364. auto & layer = layers[i];
  2365. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2366. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2367. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2368. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2369. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2370. layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
  2371. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
  2372. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
  2373. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2374. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2375. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2376. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2377. layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
  2378. }
  2379. } break;
  2380. case LLM_ARCH_STARCODER2:
  2381. {
  2382. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2383. // output
  2384. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2385. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2386. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2387. // if output is NULL, init from the input tok embed
  2388. if (output == NULL) {
  2389. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2390. }
  2391. for (int i = 0; i < n_layer; ++i) {
  2392. auto & layer = layers[i];
  2393. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2394. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2395. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2396. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2397. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2398. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2399. // optional bias tensors
  2400. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
  2401. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
  2402. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
  2403. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2404. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2405. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2406. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2407. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2408. // optional bias tensors
  2409. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2410. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP , "bias", i), { n_ff}, 0);
  2411. }
  2412. } break;
  2413. case LLM_ARCH_MAMBA:
  2414. {
  2415. const int64_t d_conv = hparams.ssm_d_conv;
  2416. const int64_t d_inner = hparams.ssm_d_inner;
  2417. const int64_t d_state = hparams.ssm_d_state;
  2418. const int64_t dt_rank = hparams.ssm_dt_rank;
  2419. // only an expansion factor of 2 is supported for now
  2420. if (2 * n_embd != d_inner) {
  2421. throw std::runtime_error("only an expansion factor of 2 is supported for now");
  2422. }
  2423. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2424. // output
  2425. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2426. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2427. // if output is NULL, init from the input tok embed, duplicated to allow offloading
  2428. if (output == NULL) {
  2429. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2430. }
  2431. for (int i = 0; i < n_layer; ++i) {
  2432. auto & layer = layers[i];
  2433. // norm
  2434. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2435. layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner}, 0);
  2436. layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner}, 0);
  2437. layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner}, 0);
  2438. layer.ssm_x = create_tensor(tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state}, 0);
  2439. layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner}, 0);
  2440. layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner}, 0);
  2441. // no "weight" suffix for these
  2442. layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner}, 0);
  2443. layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {d_inner}, 0);
  2444. // out_proj
  2445. layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
  2446. }
  2447. } break;
  2448. case LLM_ARCH_XVERSE:
  2449. {
  2450. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2451. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2452. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2453. for (int i = 0; i < n_layer; ++i) {
  2454. auto & layer = layers[i];
  2455. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2456. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2457. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2458. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2459. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2460. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2461. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2462. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2463. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2464. }
  2465. } break;
  2466. case LLM_ARCH_COMMAND_R:
  2467. {
  2468. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2469. // output
  2470. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2471. // init output from the input tok embed
  2472. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2473. for (int i = 0; i < n_layer; ++i) {
  2474. auto & layer = layers[i];
  2475. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2476. if (n_layer >= 64){
  2477. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
  2478. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
  2479. }
  2480. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2481. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2482. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2483. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2484. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2485. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2486. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2487. }
  2488. } break;
  2489. case LLM_ARCH_COHERE2:
  2490. {
  2491. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
  2492. // output
  2493. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
  2494. // init output from the input tok embed
  2495. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab },
  2496. TENSOR_DUPLICATED);
  2497. for (int i = 0; i < n_layer; ++i) {
  2498. auto & layer = layers[i];
  2499. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
  2500. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd }, 0);
  2501. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
  2502. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
  2503. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
  2504. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0);
  2505. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
  2506. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
  2507. }
  2508. }
  2509. break;
  2510. case LLM_ARCH_OLMO: // adapted from LLM_ARCH_LLAMA with norm params removed
  2511. {
  2512. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2513. // output
  2514. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2515. // if output is NULL, init from the input tok embed
  2516. if (output == NULL) {
  2517. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2518. }
  2519. for (int i = 0; i < n_layer; ++i) {
  2520. auto & layer = layers[i];
  2521. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2522. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2523. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2524. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2525. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2526. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2527. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2528. }
  2529. } break;
  2530. case LLM_ARCH_OLMO2:
  2531. {
  2532. const int64_t n_embd_head = n_embd / n_head;
  2533. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2534. // output
  2535. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2536. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2537. for (int i = 0; i < n_layer; ++i) {
  2538. auto & layer = layers[i];
  2539. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2540. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2541. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2542. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2543. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
  2544. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_head_kv * n_embd_head}, 0);
  2545. layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
  2546. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2547. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2548. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2549. layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
  2550. }
  2551. } break;
  2552. case LLM_ARCH_OLMOE:
  2553. {
  2554. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2555. // output
  2556. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2557. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2558. for (int i = 0; i < n_layer; ++i) {
  2559. auto & layer = layers[i];
  2560. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2561. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2562. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2563. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2564. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2565. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
  2566. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0);
  2567. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2568. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2569. if (n_expert == 0) {
  2570. throw std::runtime_error("n_expert must be > 0");
  2571. }
  2572. if (n_expert_used == 0) {
  2573. throw std::runtime_error("n_expert_used must be > 0");
  2574. }
  2575. // MoE branch
  2576. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  2577. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0);
  2578. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  2579. }
  2580. } break;
  2581. case LLM_ARCH_OPENELM:
  2582. {
  2583. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2584. // output
  2585. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2586. // init output from the input tok embed
  2587. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2588. for (int i = 0; i < n_layer; ++i) {
  2589. const int64_t n_head = hparams.n_head(i);
  2590. const int64_t n_head_qkv = 2*hparams.n_head_kv(i) + n_head;
  2591. const int64_t n_ff = hparams.n_ff(i);
  2592. auto & layer = layers[i];
  2593. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2594. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_head_qkv*n_embd_head_k}, 0);
  2595. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
  2596. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
  2597. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head*n_embd_head_k, n_embd}, 0);
  2598. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2599. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2600. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2601. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2602. }
  2603. } break;
  2604. case LLM_ARCH_GPTNEOX:
  2605. {
  2606. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2607. // output
  2608. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2609. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2610. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2611. for (int i = 0; i < n_layer; ++i) {
  2612. auto & layer = layers[i];
  2613. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2614. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2615. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2616. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  2617. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2618. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2619. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2620. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2621. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2622. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2623. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2624. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  2625. }
  2626. } break;
  2627. case LLM_ARCH_ARCTIC:
  2628. {
  2629. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2630. // output
  2631. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2632. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2633. // if output is NULL, init from the input tok embed
  2634. if (output == NULL) {
  2635. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2636. }
  2637. for (int i = 0; i < n_layer; ++i) {
  2638. auto & layer = layers[i];
  2639. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2640. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2641. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2642. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2643. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2644. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2645. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_embd}, 0);
  2646. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_embd, n_embd}, 0);
  2647. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_embd}, 0);
  2648. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2649. layer.ffn_norm_exps = create_tensor(tn(LLM_TENSOR_FFN_NORM_EXPS, "weight", i), {n_embd}, 0);
  2650. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, false);
  2651. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0);
  2652. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
  2653. }
  2654. } break;
  2655. case LLM_ARCH_DEEPSEEK:
  2656. {
  2657. const int64_t n_ff_exp = hparams.n_ff_exp;
  2658. const int64_t n_expert_shared = hparams.n_expert_shared;
  2659. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2660. // output
  2661. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2662. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2663. for (int i = 0; i < n_layer; ++i) {
  2664. auto & layer = layers[i];
  2665. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2666. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2667. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2668. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2669. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2670. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2671. if (i < (int) hparams.n_layer_dense_lead) {
  2672. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2673. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2674. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2675. } else {
  2676. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2677. if (n_expert == 0) {
  2678. throw std::runtime_error("n_expert must be > 0");
  2679. }
  2680. if (n_expert_used == 0) {
  2681. throw std::runtime_error("n_expert_used must be > 0");
  2682. }
  2683. // MoE branch
  2684. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2685. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  2686. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2687. // Shared expert branch
  2688. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  2689. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0);
  2690. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  2691. }
  2692. }
  2693. } break;
  2694. case LLM_ARCH_DEEPSEEK2:
  2695. {
  2696. const bool is_lite = (hparams.n_layer == 27);
  2697. const int64_t n_embd_head_qk_rope = hparams.n_rot;
  2698. const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  2699. const int64_t q_lora_rank = hparams.n_lora_q;
  2700. const int64_t kv_lora_rank = hparams.n_lora_kv;
  2701. const int64_t n_ff_exp = hparams.n_ff_exp;
  2702. const int64_t n_expert_shared = hparams.n_expert_shared;
  2703. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2704. // output
  2705. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2706. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2707. for (int i = 0; i < n_layer; ++i) {
  2708. auto & layer = layers[i];
  2709. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2710. if (!is_lite) {
  2711. layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
  2712. }
  2713. layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
  2714. if (!is_lite) {
  2715. layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
  2716. layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k}, 0);
  2717. } else {
  2718. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2719. }
  2720. layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
  2721. layer.wkv_b = create_tensor(tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
  2722. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_head * ( n_embd_head_v), n_embd}, 0);
  2723. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2724. if (i < (int) hparams.n_layer_dense_lead) {
  2725. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2726. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2727. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2728. } else {
  2729. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  2730. layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
  2731. if (n_expert == 0) {
  2732. throw std::runtime_error("n_expert must be > 0");
  2733. }
  2734. if (n_expert_used == 0) {
  2735. throw std::runtime_error("n_expert_used must be > 0");
  2736. }
  2737. // MoE branch
  2738. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2739. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  2740. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  2741. // Shared expert branch
  2742. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  2743. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0);
  2744. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  2745. }
  2746. }
  2747. } break;
  2748. case LLM_ARCH_PLM:
  2749. {
  2750. const int64_t n_embd_head_qk_rope = hparams.n_rot;
  2751. const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  2752. const int64_t kv_lora_rank = hparams.n_lora_kv;
  2753. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2754. // output
  2755. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2756. // output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2757. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2758. for (int i = 0; i < n_layer; ++i) {
  2759. auto & layer = layers[i];
  2760. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2761. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2762. layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
  2763. layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
  2764. layer.wkv_b = create_tensor(tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
  2765. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_head * ( n_embd_head_v), n_embd}, 0);
  2766. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2767. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2768. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2769. }
  2770. } break;
  2771. case LLM_ARCH_BITNET:
  2772. {
  2773. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2774. // output
  2775. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2776. for (int i = 0; i < n_layer; ++i) {
  2777. auto & layer = layers[i];
  2778. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2779. layer.attn_sub_norm = create_tensor(tn(LLM_TENSOR_ATTN_SUB_NORM, "weight", i), {n_embd}, 0);
  2780. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2781. layer.wq_scale = create_tensor(tn(LLM_TENSOR_ATTN_Q, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2782. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2783. layer.wk_scale = create_tensor(tn(LLM_TENSOR_ATTN_K, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2784. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2785. layer.wv_scale = create_tensor(tn(LLM_TENSOR_ATTN_V, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2786. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2787. layer.wo_scale = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2788. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2789. layer.ffn_sub_norm = create_tensor(tn(LLM_TENSOR_FFN_SUB_NORM, "weight", i), {n_ff}, 0);
  2790. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2791. layer.ffn_gate_scale = create_tensor(tn(LLM_TENSOR_FFN_GATE, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2792. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2793. layer.ffn_down_scale = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2794. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2795. layer.ffn_up_scale = create_tensor(tn(LLM_TENSOR_FFN_UP, "scale", i), {1}, TENSOR_NOT_REQUIRED);
  2796. }
  2797. } break;
  2798. case LLM_ARCH_T5:
  2799. {
  2800. const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
  2801. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2802. // output
  2803. output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2804. output_norm = create_tensor(tn(LLM_TENSOR_DEC_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2805. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2806. // if output is NULL, init from the input tok embed
  2807. if (output == NULL) {
  2808. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2809. }
  2810. for (int i = 0; i < n_layer; ++i) {
  2811. auto & layer = layers[i];
  2812. layer.attn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM, "weight", i), {n_embd}, 0);
  2813. layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
  2814. layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2815. layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2816. layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2817. layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
  2818. layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
  2819. layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
  2820. layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2821. layer.ffn_up_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2822. layer.attn_norm = create_tensor(tn(LLM_TENSOR_DEC_ATTN_NORM, "weight", i), {n_embd}, 0);
  2823. layer.attn_rel_b = create_tensor(tn(LLM_TENSOR_DEC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
  2824. layer.wq = create_tensor(tn(LLM_TENSOR_DEC_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2825. layer.wk = create_tensor(tn(LLM_TENSOR_DEC_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2826. layer.wv = create_tensor(tn(LLM_TENSOR_DEC_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2827. layer.wo = create_tensor(tn(LLM_TENSOR_DEC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
  2828. layer.attn_norm_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_NORM, "weight", i), {n_embd}, 0);
  2829. // this tensor seems to be unused in HF transformers implementation
  2830. layer.attn_rel_b_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
  2831. layer.wq_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2832. layer.wk_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2833. layer.wv_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2834. layer.wo_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
  2835. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_DEC_FFN_NORM, "weight", i), {n_embd}, 0);
  2836. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_DEC_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
  2837. layer.ffn_down = create_tensor(tn(LLM_TENSOR_DEC_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2838. layer.ffn_up = create_tensor(tn(LLM_TENSOR_DEC_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2839. }
  2840. } break;
  2841. case LLM_ARCH_T5ENCODER:
  2842. {
  2843. const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
  2844. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2845. // output
  2846. output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2847. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2848. // if output is NULL, init from the input tok embed
  2849. if (output == NULL) {
  2850. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2851. }
  2852. for (int i = 0; i < n_layer; ++i) {
  2853. auto & layer = layers[i];
  2854. layer.attn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM, "weight", i), {n_embd}, 0);
  2855. layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
  2856. layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2857. layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2858. layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2859. layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
  2860. layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
  2861. layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
  2862. layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2863. layer.ffn_up_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2864. }
  2865. } break;
  2866. case LLM_ARCH_JAIS:
  2867. {
  2868. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2869. // output
  2870. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2871. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2872. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2873. for (int i = 0; i < n_layer; ++i) {
  2874. auto & layer = layers[i];
  2875. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2876. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2877. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
  2878. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
  2879. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2880. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
  2881. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2882. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2883. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2884. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
  2885. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2886. layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, 0);
  2887. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2888. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0);
  2889. }
  2890. } break;
  2891. case LLM_ARCH_CHATGLM:
  2892. {
  2893. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2894. // output
  2895. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2896. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2897. for (int i = 0; i < n_layer; ++i) {
  2898. auto & layer = layers[i];
  2899. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2900. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2901. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2902. if (layer.wqkv == nullptr) {
  2903. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2904. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2905. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2906. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2907. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2908. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2909. }
  2910. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2911. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2912. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff * 2}, 0);
  2913. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
  2914. }
  2915. } break;
  2916. case LLM_ARCH_GLM4:
  2917. {
  2918. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2919. // output
  2920. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2921. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2922. // if output is NULL, init from the input tok embed
  2923. if (output == NULL) {
  2924. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2925. }
  2926. for (int i = 0; i < n_layer; ++i) {
  2927. auto & layer = layers[i];
  2928. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2929. layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2930. layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2931. if (layer.wqkv == nullptr) {
  2932. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2933. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2934. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2935. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2936. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2937. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2938. }
  2939. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2940. layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
  2941. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2942. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2943. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff * 2}, 0);
  2944. layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
  2945. }
  2946. } break;
  2947. case LLM_ARCH_NEMOTRON:
  2948. {
  2949. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2950. // output
  2951. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2952. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  2953. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  2954. for (int i = 0; i < n_layer; ++i) {
  2955. auto & layer = layers[i];
  2956. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2957. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  2958. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  2959. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  2960. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  2961. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  2962. // optional bias tensors
  2963. layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2964. layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2965. layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
  2966. layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2967. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2968. layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
  2969. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2970. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2971. // optional MLP bias
  2972. layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  2973. layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
  2974. }
  2975. } break;
  2976. case LLM_ARCH_EXAONE:
  2977. {
  2978. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  2979. // output
  2980. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  2981. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  2982. // if output is NULL, init from the input tok embed
  2983. if (output == NULL) {
  2984. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  2985. }
  2986. for (int i = 0; i < n_layer; ++i) {
  2987. auto & layer = layers[i];
  2988. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  2989. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
  2990. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
  2991. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
  2992. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
  2993. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  2994. layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
  2995. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  2996. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  2997. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  2998. }
  2999. } break;
  3000. case LLM_ARCH_RWKV6:
  3001. {
  3002. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3003. // Block 0, LN0
  3004. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
  3005. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
  3006. // output
  3007. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3008. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  3009. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3010. const int time_mix_extra_dim = hparams.time_mix_extra_dim;
  3011. const int time_decay_extra_dim = hparams.time_decay_extra_dim;
  3012. const int head_size = hparams.wkv_head_size;
  3013. const int attn_hidden_size = n_embd;
  3014. const int ffn_size = hparams.n_ff_arr[0];
  3015. for (int i = 0; i < n_layer; ++i) {
  3016. auto & layer = layers[i];
  3017. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3018. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  3019. layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0);
  3020. layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, 0);
  3021. layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
  3022. layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
  3023. layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
  3024. layer.time_mix_lerp_w = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_W, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
  3025. layer.time_mix_lerp_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
  3026. layer.time_mix_lerp_v = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_V, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
  3027. layer.time_mix_lerp_r = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
  3028. layer.time_mix_lerp_g = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_G, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
  3029. layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, TENSOR_NOT_REQUIRED);
  3030. GGML_ASSERT(!(layer.time_mix_lerp_fused == NULL && layer.time_mix_lerp_w == NULL));
  3031. layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, 0);
  3032. layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
  3033. layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
  3034. layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
  3035. layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
  3036. layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3037. layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3038. layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3039. layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0);
  3040. layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0);
  3041. layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
  3042. layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0);
  3043. layer.channel_mix_lerp_r = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, 0);
  3044. layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0);
  3045. layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0);
  3046. layer.channel_mix_receptance = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "weight", i), {n_embd, n_embd}, 0);
  3047. }
  3048. } break;
  3049. case LLM_ARCH_RWKV6QWEN2:
  3050. {
  3051. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3052. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3053. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, TENSOR_NOT_REQUIRED);
  3054. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3055. const int time_mix_extra_dim = hparams.time_mix_extra_dim;
  3056. const int time_decay_extra_dim = hparams.time_decay_extra_dim;
  3057. const int head_size = hparams.wkv_head_size;
  3058. const int attn_hidden_size = n_embd;
  3059. const int n_head_kv = hparams.n_head_kv();
  3060. int attn_key_value_size;
  3061. if (n_head_kv == 0 || attn_hidden_size / head_size == n_head_kv) {
  3062. attn_key_value_size = attn_hidden_size;
  3063. } else {
  3064. attn_key_value_size = n_head_kv * head_size;
  3065. }
  3066. for (int i = 0; i < n_layer; ++i) {
  3067. auto & layer = layers[i];
  3068. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3069. layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
  3070. layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
  3071. layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
  3072. layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0);
  3073. layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, TENSOR_NOT_REQUIRED);
  3074. layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
  3075. layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
  3076. layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
  3077. layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {n_embd, attn_key_value_size}, 0);
  3078. layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {n_embd, attn_key_value_size}, 0);
  3079. layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3080. layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3081. // optional bias tensors
  3082. layer.time_mix_key_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "bias", i), {attn_key_value_size}, TENSOR_NOT_REQUIRED);
  3083. layer.time_mix_value_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "bias", i), {attn_key_value_size}, TENSOR_NOT_REQUIRED);
  3084. layer.time_mix_receptance_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "bias", i), {attn_hidden_size}, TENSOR_NOT_REQUIRED);
  3085. layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
  3086. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3087. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  3088. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  3089. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  3090. }
  3091. } break;
  3092. case LLM_ARCH_RWKV7:
  3093. {
  3094. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3095. // Block 0, LN0
  3096. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
  3097. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
  3098. // output
  3099. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3100. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  3101. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3102. const int n_lora_decay = hparams.n_lora_decay;
  3103. const int n_lora_iclr = hparams.n_lora_iclr;
  3104. const int n_lora_value_res_mix = hparams.n_lora_value_res_mix;
  3105. const int n_lora_gate = hparams.n_lora_gate;
  3106. const int attn_hidden_size = n_embd;
  3107. const int ffn_size = hparams.n_ff_arr[0];
  3108. for (int i = 0; i < n_layer; ++i) {
  3109. auto & layer = layers[i];
  3110. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3111. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
  3112. layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0);
  3113. layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, 0);
  3114. layer.time_mix_w0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W0, "weight", i), {n_embd}, 0);
  3115. layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, n_lora_decay}, 0);
  3116. layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {n_lora_decay, n_embd}, 0);
  3117. layer.time_mix_a0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A0, "weight", i), {n_embd}, 0);
  3118. layer.time_mix_a1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A1, "weight", i), {n_embd, n_lora_iclr}, 0);
  3119. layer.time_mix_a2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A2, "weight", i), {n_lora_iclr, n_embd}, 0);
  3120. if (i == 0) {
  3121. // actually not used
  3122. layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
  3123. layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_iclr}, 0);
  3124. layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_iclr, n_embd}, 0);
  3125. } else {
  3126. layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
  3127. layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_value_res_mix}, 0);
  3128. layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_value_res_mix, n_embd}, 0);
  3129. }
  3130. layer.time_mix_g1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G1, "weight", i), {n_embd, n_lora_gate}, 0);
  3131. layer.time_mix_g2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G2, "weight", i), {n_lora_gate, n_embd}, 0);
  3132. layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 6}, 0);
  3133. layer.time_mix_k_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_K, "weight", i), {attn_hidden_size}, 0);
  3134. layer.time_mix_k_a = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_A, "weight", i), {attn_hidden_size}, 0);
  3135. layer.time_mix_r_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_R_K, "weight", i), {attn_hidden_size}, 0);
  3136. layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
  3137. layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3138. layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3139. layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0);
  3140. layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0);
  3141. layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
  3142. layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0);
  3143. layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0);
  3144. layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0);
  3145. }
  3146. } break;
  3147. case LLM_ARCH_ARWKV7:
  3148. {
  3149. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3150. // output
  3151. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3152. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3153. const int n_lora_decay = hparams.n_lora_decay;
  3154. const int n_lora_iclr = hparams.n_lora_iclr;
  3155. const int n_lora_value_res_mix = hparams.n_lora_value_res_mix;
  3156. const int n_lora_gate = hparams.n_lora_gate;
  3157. const int attn_hidden_size = n_embd;
  3158. for (int i = 0; i < n_layer; ++i) {
  3159. auto & layer = layers[i];
  3160. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3161. layer.time_mix_w0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W0, "weight", i), {n_embd}, 0);
  3162. layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, n_lora_decay}, 0);
  3163. layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {n_lora_decay, n_embd}, 0);
  3164. layer.time_mix_a0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A0, "weight", i), {n_embd}, 0);
  3165. layer.time_mix_a1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A1, "weight", i), {n_embd, n_lora_iclr}, 0);
  3166. layer.time_mix_a2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A2, "weight", i), {n_lora_iclr, n_embd}, 0);
  3167. if (i == 0) {
  3168. // actually not used
  3169. layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
  3170. layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_iclr}, 0);
  3171. layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_iclr, n_embd}, 0);
  3172. } else {
  3173. layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
  3174. layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_value_res_mix}, 0);
  3175. layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_value_res_mix, n_embd}, 0);
  3176. }
  3177. layer.time_mix_g1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G1, "weight", i), {n_embd, n_lora_gate}, TENSOR_NOT_REQUIRED);
  3178. layer.time_mix_g2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G2, "weight", i), {n_lora_gate, n_embd}, TENSOR_NOT_REQUIRED);
  3179. try {
  3180. layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 6}, 0);
  3181. } catch(std::runtime_error & e) {
  3182. // ARWKV models may not have gate tensors
  3183. layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0);
  3184. }
  3185. layer.time_mix_k_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_K, "weight", i), {attn_hidden_size}, 0);
  3186. layer.time_mix_k_a = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_A, "weight", i), {attn_hidden_size}, 0);
  3187. layer.time_mix_r_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_R_K, "weight", i), {attn_hidden_size}, 0);
  3188. layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
  3189. layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3190. layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
  3191. layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
  3192. layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
  3193. layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
  3194. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3195. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  3196. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  3197. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  3198. }
  3199. } break;
  3200. case LLM_ARCH_CHAMELEON:
  3201. {
  3202. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3203. // output
  3204. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3205. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
  3206. // if output is NULL, init from the input tok embed
  3207. if (output == NULL) {
  3208. output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
  3209. }
  3210. for (int i = 0; i < n_layer; ++i) {
  3211. auto & layer = layers[i];
  3212. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3213. layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
  3214. layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
  3215. layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd_head_k, n_head}, TENSOR_NOT_REQUIRED);
  3216. layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd_head_k, n_head_kv}, TENSOR_NOT_REQUIRED);
  3217. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
  3218. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
  3219. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
  3220. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
  3221. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3222. layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
  3223. layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
  3224. layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
  3225. }
  3226. } break;
  3227. case LLM_ARCH_WAVTOKENIZER_DEC:
  3228. {
  3229. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hparams.n_embd_features, n_vocab}, 0);
  3230. conv1d = create_tensor(tn(LLM_TENSOR_CONV1D, "weight"), {7, hparams.n_embd_features, hparams.posnet.n_embd}, 0);
  3231. conv1d_b = create_tensor(tn(LLM_TENSOR_CONV1D, "bias"), {1, hparams.posnet.n_embd}, 0);
  3232. // posnet
  3233. {
  3234. const int64_t n_embd = hparams.posnet.n_embd;
  3235. for (uint32_t i = 0; i < hparams.posnet.n_layer; ++i) {
  3236. auto & layer = layers[i].posnet;
  3237. // posnet:
  3238. //
  3239. // - resnet
  3240. // - resnet
  3241. // - attn
  3242. // - resnet
  3243. // - resnet
  3244. // - norm
  3245. //
  3246. switch (i) {
  3247. case 0:
  3248. case 1:
  3249. case 3:
  3250. case 4:
  3251. {
  3252. layer.norm1 = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "weight", i), {1, n_embd}, 0);
  3253. layer.norm1_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "bias", i), {1, n_embd}, 0);
  3254. layer.conv1 = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "weight", i), {3, n_embd, n_embd}, 0);
  3255. layer.conv1_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "bias", i), {1, n_embd}, 0);
  3256. layer.norm2 = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "weight", i), {1, n_embd}, 0);
  3257. layer.norm2_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "bias", i), {1, n_embd}, 0);
  3258. layer.conv2 = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "weight", i), {3, n_embd, n_embd}, 0);
  3259. layer.conv2_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "bias", i), {1, n_embd}, 0);
  3260. } break;
  3261. case 2:
  3262. {
  3263. layer.attn_norm = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
  3264. layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias", i), {1, n_embd}, 0);
  3265. layer.attn_q = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q, "weight", i), {1, n_embd, n_embd}, 0);
  3266. layer.attn_q_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q, "bias", i), {1, n_embd}, 0);
  3267. layer.attn_k = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K, "weight", i), {1, n_embd, n_embd}, 0);
  3268. layer.attn_k_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K, "bias", i), {1, n_embd}, 0);
  3269. layer.attn_v = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V, "weight", i), {1, n_embd, n_embd}, 0);
  3270. layer.attn_v_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V, "bias", i), {1, n_embd}, 0);
  3271. layer.attn_o = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT, "weight", i), {1, n_embd, n_embd}, 0);
  3272. layer.attn_o_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT, "bias", i), {1, n_embd}, 0);
  3273. } break;
  3274. case 5:
  3275. {
  3276. layer.norm = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
  3277. layer.norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias", i), {1, n_embd}, 0);
  3278. } break;
  3279. default: GGML_ABORT("unknown posnet layer");
  3280. };
  3281. }
  3282. }
  3283. GGML_ASSERT(hparams.posnet.n_embd == hparams.convnext.n_embd);
  3284. tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {hparams.posnet.n_embd}, 0);
  3285. tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {hparams.posnet.n_embd}, 0);
  3286. // convnext
  3287. {
  3288. const int64_t n_embd = hparams.convnext.n_embd;
  3289. for (uint32_t i = 0; i < hparams.convnext.n_layer; ++i) {
  3290. auto & layer = layers[i].convnext;
  3291. layer.dw = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW, "weight", i), {7, 1, n_embd}, 0);
  3292. layer.dw_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW, "bias", i), {1, n_embd}, 0);
  3293. layer.norm = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM, "weight", i), {n_embd}, 0);
  3294. layer.norm_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM, "bias", i), {n_embd}, 0);
  3295. layer.pw1 = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1, "weight", i), {n_embd, n_ff}, 0);
  3296. layer.pw1_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1, "bias", i), {n_ff}, 0);
  3297. layer.pw2 = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2, "weight", i), {n_ff, n_embd}, 0);
  3298. layer.pw2_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2, "bias", i), {n_embd}, 0);
  3299. layer.gamma = create_tensor(tn(LLM_TENSOR_CONVNEXT_GAMMA, "weight", i), {n_embd}, 0);
  3300. }
  3301. // output
  3302. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3303. output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
  3304. }
  3305. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hparams.convnext.n_embd, n_embd}, 0);
  3306. output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), {n_embd}, 0);
  3307. } break;
  3308. case LLM_ARCH_BAILINGMOE:
  3309. {
  3310. const int64_t n_ff_exp = hparams.n_ff_exp;
  3311. const int64_t n_expert_shared = hparams.n_expert_shared;
  3312. tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
  3313. // output
  3314. output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
  3315. output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
  3316. for (int i = 0; i < n_layer; ++i) {
  3317. auto & layer = layers[i];
  3318. layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
  3319. layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_head * n_rot}, 0);
  3320. layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_head_kv * n_rot}, 0);
  3321. layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_head_kv * n_rot}, 0);
  3322. layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_rot, n_embd}, 0);
  3323. layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
  3324. layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
  3325. if (n_expert == 0) {
  3326. throw std::runtime_error("n_expert must be > 0");
  3327. }
  3328. if (n_expert_used == 0) {
  3329. throw std::runtime_error("n_expert_used must be > 0");
  3330. }
  3331. layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  3332. layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
  3333. layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0);
  3334. layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  3335. layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0);
  3336. layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
  3337. }
  3338. } break;
  3339. default:
  3340. throw std::runtime_error("unknown architecture");
  3341. }
  3342. if (n_moved_tensors > 0) {
  3343. LLAMA_LOG_DEBUG("%s: tensor '%s' (%s) (and %d others) cannot be used with preferred buffer type %s, using %s instead\n",
  3344. __func__, first_moved_tensor->name, ggml_type_name(first_moved_tensor->type), n_moved_tensors - 1,
  3345. ggml_backend_buft_name(first_moved_from_buft), ggml_backend_buft_name(first_moved_to_buft));
  3346. }
  3347. }
  3348. ml.done_getting_tensors();
  3349. ml.init_mappings(true, use_mlock ? &pimpl->mlock_mmaps : nullptr);
  3350. pimpl->mappings.reserve(ml.mappings.size());
  3351. // create the backend buffers
  3352. std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_bufs;
  3353. ctx_bufs.reserve(ctx_map.size());
  3354. // Ensure we have enough capacity for the maximum backend buffer we will potentially create
  3355. const size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
  3356. pimpl->bufs.reserve(n_max_backend_buffer);
  3357. for (auto & it : ctx_map) {
  3358. ggml_backend_buffer_type_t buft = it.first;
  3359. ggml_context * ctx = it.second;
  3360. // skip contexts without tensors
  3361. if (ggml_get_first_tensor(ctx) == nullptr) {
  3362. continue;
  3363. }
  3364. llama_buf_map buf_map;
  3365. buf_map.reserve(n_max_backend_buffer);
  3366. // check if it is possible to use buffer_from_host_ptr with this buffer type
  3367. ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
  3368. if (!dev) {
  3369. // FIXME: workaround for CPU backend buft having a NULL device
  3370. dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
  3371. }
  3372. ggml_backend_dev_props props;
  3373. ggml_backend_dev_get_props(dev, &props);
  3374. bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
  3375. bool is_default_buft = buft == ggml_backend_dev_buffer_type(dev);
  3376. if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) {
  3377. for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
  3378. // only the mmap region containing the tensors in the model is mapped to the backend buffer
  3379. // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
  3380. // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
  3381. void * addr = nullptr;
  3382. size_t first, last; // NOLINT
  3383. ml.get_mapping_range(&first, &last, &addr, idx, ctx);
  3384. if (first >= last) {
  3385. continue;
  3386. }
  3387. const size_t max_size = ggml_get_max_tensor_size(ctx);
  3388. ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
  3389. if (buf == nullptr) {
  3390. throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
  3391. }
  3392. pimpl->bufs.emplace_back(buf);
  3393. buf_map.emplace(idx, buf);
  3394. }
  3395. }
  3396. else {
  3397. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  3398. if (buf == nullptr) {
  3399. throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
  3400. }
  3401. pimpl->bufs.emplace_back(buf);
  3402. if (use_mlock && ggml_backend_buffer_is_host(buf)) {
  3403. pimpl->mlock_bufs.emplace_back(new llama_mlock);
  3404. auto & mlock_buf = pimpl->mlock_bufs.back();
  3405. mlock_buf->init (ggml_backend_buffer_get_base(buf));
  3406. mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
  3407. }
  3408. for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
  3409. buf_map.emplace(idx, buf);
  3410. }
  3411. }
  3412. if (pimpl->bufs.empty()) {
  3413. throw std::runtime_error("failed to allocate buffer");
  3414. }
  3415. for (auto & buf : buf_map) {
  3416. // indicate that this buffer contains weights
  3417. // this is used by ggml_backend_sched to improve op scheduling: ops that use a weight are preferably scheduled to the backend that contains the weight
  3418. ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  3419. }
  3420. ctx_bufs.emplace_back(ctx, buf_map);
  3421. }
  3422. if (llama_supports_gpu_offload()) {
  3423. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  3424. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  3425. if (n_gpu_layers > (int) hparams.n_layer) {
  3426. LLAMA_LOG_INFO("%s: offloading output layer to GPU\n", __func__);
  3427. }
  3428. const int max_backend_supported_layers = hparams.n_layer + 1;
  3429. const int max_offloadable_layers = hparams.n_layer + 1;
  3430. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  3431. }
  3432. // print memory requirements per buffer type
  3433. for (auto & buf : pimpl->bufs) {
  3434. LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get()) / 1024.0 / 1024.0);
  3435. }
  3436. // populate tensors_by_name
  3437. for (auto & ctx : pimpl->ctxs) {
  3438. for (auto * cur = ggml_get_first_tensor(ctx.get()); cur != NULL; cur = ggml_get_next_tensor(ctx.get(), cur)) {
  3439. tensors_by_name.emplace_back(ggml_get_name(cur), cur);
  3440. }
  3441. }
  3442. // load tensor data
  3443. for (auto & it : ctx_bufs) {
  3444. ggml_context * ctx = it.first;
  3445. auto & bufs = it.second;
  3446. if (!ml.load_all_data(ctx, bufs, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) {
  3447. return false;
  3448. }
  3449. }
  3450. if (use_mmap_buffer) {
  3451. for (auto & mapping : ml.mappings) {
  3452. pimpl->mappings.emplace_back(std::move(mapping));
  3453. }
  3454. }
  3455. return true;
  3456. }
  3457. std::string llama_model::arch_name() const {
  3458. return llm_arch_name(arch);
  3459. }
  3460. std::string llama_model::type_name() const {
  3461. return llm_type_name(type);
  3462. }
  3463. std::string llama_model::desc() const {
  3464. return pimpl->desc_str;
  3465. }
  3466. size_t llama_model::size() const {
  3467. return pimpl->n_bytes;
  3468. }
  3469. size_t llama_model::n_tensors() const {
  3470. return tensors_by_name.size();
  3471. }
  3472. size_t llama_model::n_devices() const {
  3473. return devices.size();
  3474. }
  3475. uint64_t llama_model::n_elements() const {
  3476. return pimpl->n_elements;
  3477. }
  3478. void llama_model::print_info() const {
  3479. const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
  3480. auto print_f = [](const std::function<uint32_t(uint32_t)> & f, uint32_t n) {
  3481. bool is_var = false;
  3482. std::vector<uint32_t> v;
  3483. for (uint32_t i = 0; i < n; ++i) {
  3484. v.push_back(f(i));
  3485. if (v[i] != v[0]) {
  3486. is_var = true;
  3487. }
  3488. }
  3489. std::stringstream ss;
  3490. if (is_var) {
  3491. ss << "[";
  3492. for (uint32_t i = 0; i < n; ++i) {
  3493. ss << v[i];
  3494. if (i < n - 1) {
  3495. ss << ", ";
  3496. }
  3497. }
  3498. ss << "]";
  3499. } else {
  3500. ss << v[0];
  3501. }
  3502. return ss.str();
  3503. };
  3504. // hparams
  3505. LLAMA_LOG_INFO("%s: arch = %s\n", __func__, arch_name().c_str());
  3506. LLAMA_LOG_INFO("%s: vocab_only = %d\n", __func__, hparams.vocab_only);
  3507. if (!hparams.vocab_only) {
  3508. LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
  3509. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  3510. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  3511. LLAMA_LOG_INFO("%s: n_head = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head(il); }, hparams.n_layer).c_str());
  3512. LLAMA_LOG_INFO("%s: n_head_kv = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str());
  3513. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot);
  3514. LLAMA_LOG_INFO("%s: n_swa = %u\n", __func__, hparams.n_swa);
  3515. LLAMA_LOG_INFO("%s: n_swa_pattern = %u\n", __func__, hparams.n_swa_pattern);
  3516. LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k);
  3517. LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v);
  3518. LLAMA_LOG_INFO("%s: n_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il); }, hparams.n_layer).c_str());
  3519. LLAMA_LOG_INFO("%s: n_embd_k_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_embd_k_gqa(il); }, hparams.n_layer).c_str());
  3520. LLAMA_LOG_INFO("%s: n_embd_v_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_embd_v_gqa(il); }, hparams.n_layer).c_str());
  3521. LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
  3522. LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
  3523. LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
  3524. LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
  3525. LLAMA_LOG_INFO("%s: f_logit_scale = %.1e\n", __func__, hparams.f_logit_scale);
  3526. LLAMA_LOG_INFO("%s: f_attn_scale = %.1e\n", __func__, hparams.f_attention_scale);
  3527. LLAMA_LOG_INFO("%s: n_ff = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str());
  3528. LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
  3529. LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
  3530. LLAMA_LOG_INFO("%s: causal attn = %d\n", __func__, hparams.causal_attn);
  3531. LLAMA_LOG_INFO("%s: pooling type = %d\n", __func__, hparams.pooling_type);
  3532. LLAMA_LOG_INFO("%s: rope type = %d\n", __func__, hparams.rope_type);
  3533. LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type);
  3534. LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
  3535. LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
  3536. LLAMA_LOG_INFO("%s: n_ctx_orig_yarn = %u\n", __func__, hparams.n_ctx_orig_yarn);
  3537. LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
  3538. LLAMA_LOG_INFO("%s: ssm_d_conv = %u\n", __func__, hparams.ssm_d_conv);
  3539. LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner);
  3540. LLAMA_LOG_INFO("%s: ssm_d_state = %u\n", __func__, hparams.ssm_d_state);
  3541. LLAMA_LOG_INFO("%s: ssm_dt_rank = %u\n", __func__, hparams.ssm_dt_rank);
  3542. LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms = %d\n", __func__, hparams.ssm_dt_b_c_rms);
  3543. }
  3544. LLAMA_LOG_INFO("%s: model type = %s\n", __func__, type_name().c_str());
  3545. if (pimpl->n_elements >= 1e12) {
  3546. LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, pimpl->n_elements*1e-12);
  3547. } else if (pimpl->n_elements >= 1e9) {
  3548. LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, pimpl->n_elements*1e-9);
  3549. } else if (pimpl->n_elements >= 1e6) {
  3550. LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, pimpl->n_elements*1e-6);
  3551. } else {
  3552. LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, pimpl->n_elements*1e-3);
  3553. }
  3554. // general kv
  3555. LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, name.c_str());
  3556. if (arch == LLM_ARCH_DEEPSEEK) {
  3557. LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead);
  3558. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  3559. LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared);
  3560. LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale);
  3561. }
  3562. if (arch == LLM_ARCH_DEEPSEEK2) {
  3563. LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead);
  3564. LLAMA_LOG_INFO("%s: n_lora_q = %d\n", __func__, hparams.n_lora_q);
  3565. LLAMA_LOG_INFO("%s: n_lora_kv = %d\n", __func__, hparams.n_lora_kv);
  3566. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  3567. LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared);
  3568. LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale);
  3569. LLAMA_LOG_INFO("%s: expert_weights_norm = %d\n", __func__, hparams.expert_weights_norm);
  3570. LLAMA_LOG_INFO("%s: expert_gating_func = %s\n", __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func));
  3571. LLAMA_LOG_INFO("%s: rope_yarn_log_mul = %.4f\n", __func__, hparams.rope_yarn_log_mul);
  3572. }
  3573. if (arch == LLM_ARCH_QWEN2MOE) {
  3574. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  3575. LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp);
  3576. }
  3577. if (arch == LLM_ARCH_QWEN3MOE) {
  3578. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  3579. }
  3580. if (arch == LLM_ARCH_MINICPM || arch == LLM_ARCH_GRANITE || arch == LLM_ARCH_GRANITE_MOE) {
  3581. LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
  3582. LLAMA_LOG_INFO("%s: f_residual_scale = %f\n", __func__, hparams.f_residual_scale);
  3583. LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
  3584. }
  3585. if (arch == LLM_ARCH_BAILINGMOE) {
  3586. LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead);
  3587. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  3588. LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared);
  3589. LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale);
  3590. LLAMA_LOG_INFO("%s: expert_weights_norm = %d\n", __func__, hparams.expert_weights_norm);
  3591. }
  3592. vocab.print_info();
  3593. }
  3594. ggml_backend_dev_t llama_model::dev_layer(int il) const {
  3595. return pimpl->dev_layer.at(il).dev;
  3596. }
  3597. ggml_backend_dev_t llama_model::dev_output() const {
  3598. return pimpl->dev_output.dev;
  3599. }
  3600. template<typename F>
  3601. static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) {
  3602. ggml_init_params params = {
  3603. /*.mem_size =*/ ggml_tensor_overhead()*8,
  3604. /*.mem_buffer =*/ NULL,
  3605. /*.no_alloc =*/ true,
  3606. };
  3607. ggml_context_ptr ctx { ggml_init(params) };
  3608. if (!ctx) {
  3609. throw std::runtime_error(format("failed to create ggml context"));
  3610. }
  3611. ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) };
  3612. ggml_tensor * op_tensor = fn(ctx.get());
  3613. for (int i = 0; i < GGML_MAX_SRC; i++) {
  3614. if (op_tensor->src[i] != nullptr) {
  3615. assert(op_tensor->src[i]->buffer == nullptr);
  3616. op_tensor->src[i]->buffer = buf.get();
  3617. }
  3618. }
  3619. bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
  3620. return op_supported;
  3621. }
  3622. template<typename F>
  3623. static ggml_backend_buffer_type_t select_buft(const buft_list_t & buft_list, const F & fn) {
  3624. for (const auto & cur : buft_list) {
  3625. ggml_backend_dev_t cur_dev = cur.first;
  3626. ggml_backend_buffer_type_t cur_buft = cur.second;
  3627. if (buft_supported(cur_buft, cur_dev, fn)) {
  3628. return cur_buft;
  3629. }
  3630. }
  3631. throw std::runtime_error(format("no suitable buffer type found"));
  3632. }
  3633. ggml_backend_buffer_type_t llama_model::select_buft(int il) const {
  3634. return ::select_buft(
  3635. *pimpl->dev_layer.at(il).buft_list,
  3636. [&](ggml_context * ctx) {
  3637. ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
  3638. ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
  3639. return ggml_add(ctx, cur, layer_dir);
  3640. });
  3641. }
  3642. bool llama_model::has_tensor_overrides() const {
  3643. return pimpl->has_tensor_overrides;
  3644. }
  3645. const ggml_tensor * llama_model::get_tensor(const char * name) const {
  3646. auto it = std::find_if(tensors_by_name.begin(), tensors_by_name.end(),
  3647. [name](const std::pair<std::string, ggml_tensor *> & it) {
  3648. return it.first == name;
  3649. });
  3650. if (it == tensors_by_name.end()) {
  3651. return nullptr;
  3652. }
  3653. return it->second;
  3654. }
  3655. struct llm_build_llama : public llm_graph_context {
  3656. llm_build_llama(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  3657. const int64_t n_embd_head = hparams.n_embd_head_v;
  3658. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  3659. GGML_ASSERT(n_embd_head == hparams.n_rot);
  3660. ggml_tensor * cur;
  3661. ggml_tensor * inpL;
  3662. inpL = build_inp_embd(model.tok_embd);
  3663. // inp_pos - contains the positions
  3664. ggml_tensor * inp_pos = build_inp_pos();
  3665. // temperature tuning
  3666. ggml_tensor * inp_attn_scale = nullptr;
  3667. if (arch == LLM_ARCH_LLAMA4) {
  3668. inp_attn_scale = build_inp_attn_scale();
  3669. }
  3670. auto * inp_attn = build_attn_inp_kv_unified();
  3671. const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
  3672. for (int il = 0; il < n_layer; ++il) {
  3673. ggml_tensor * inpSA = inpL;
  3674. bool use_rope = arch == LLM_ARCH_LLAMA4
  3675. ? (il + 1) % hparams.n_no_rope_layer_step != 0
  3676. : true;
  3677. // norm
  3678. cur = build_norm(inpL,
  3679. model.layers[il].attn_norm, NULL,
  3680. LLM_NORM_RMS, il);
  3681. cb(cur, "attn_norm", il);
  3682. // self-attention
  3683. {
  3684. // rope freq factors for llama3; may return nullptr for llama2 and other models
  3685. ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il);
  3686. // compute Q and K and RoPE them
  3687. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  3688. cb(Qcur, "Qcur", il);
  3689. if (model.layers[il].bq) {
  3690. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  3691. cb(Qcur, "Qcur", il);
  3692. }
  3693. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  3694. cb(Kcur, "Kcur", il);
  3695. if (model.layers[il].bk) {
  3696. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  3697. cb(Kcur, "Kcur", il);
  3698. }
  3699. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  3700. cb(Vcur, "Vcur", il);
  3701. if (model.layers[il].bv) {
  3702. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  3703. cb(Vcur, "Vcur", il);
  3704. }
  3705. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3706. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  3707. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  3708. if (use_rope) {
  3709. Qcur = ggml_rope_ext(
  3710. ctx0, Qcur, inp_pos, rope_factors,
  3711. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  3712. ext_factor, attn_factor, beta_fast, beta_slow
  3713. );
  3714. Kcur = ggml_rope_ext(
  3715. ctx0, Kcur, inp_pos, rope_factors,
  3716. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  3717. ext_factor, attn_factor, beta_fast, beta_slow
  3718. );
  3719. } else if (inp_attn_scale) {
  3720. Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
  3721. }
  3722. cb(Qcur, "Qcur", il);
  3723. cb(Kcur, "Kcur", il);
  3724. cb(Vcur, "Vcur", il);
  3725. if (arch == LLM_ARCH_LLAMA4 && use_rope && hparams.use_kq_norm) {
  3726. // Llama4TextL2Norm
  3727. Qcur = ggml_rms_norm(ctx0, Qcur, hparams.f_norm_rms_eps);
  3728. Kcur = ggml_rms_norm(ctx0, Kcur, hparams.f_norm_rms_eps);
  3729. cb(Qcur, "Qcur_normed", il);
  3730. cb(Kcur, "Kcur_normed", il);
  3731. }
  3732. cur = build_attn(inp_attn, gf,
  3733. model.layers[il].wo, model.layers[il].bo,
  3734. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  3735. cb(cur, "attn_out", il);
  3736. }
  3737. if (il == n_layer - 1) {
  3738. // skip computing output for unused tokens
  3739. ggml_tensor * inp_out_ids = build_inp_out_ids();
  3740. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  3741. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  3742. }
  3743. // For Granite architecture
  3744. if (hparams.f_residual_scale) {
  3745. cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
  3746. }
  3747. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  3748. cb(ffn_inp, "ffn_inp", il);
  3749. // feed-forward network (non-MoE)
  3750. if (model.layers[il].ffn_gate_inp == nullptr) {
  3751. cur = build_norm(ffn_inp,
  3752. model.layers[il].ffn_norm, NULL,
  3753. LLM_NORM_RMS, il);
  3754. cb(cur, "ffn_norm", il);
  3755. cur = build_ffn(cur,
  3756. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  3757. model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
  3758. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  3759. NULL,
  3760. LLM_FFN_SILU, LLM_FFN_PAR, il);
  3761. cb(cur, "ffn_out", il);
  3762. } else if (arch == LLM_ARCH_LLAMA4) {
  3763. // llama4 MoE
  3764. ggml_tensor * ffn_inp_normed = build_norm(ffn_inp,
  3765. model.layers[il].ffn_norm, NULL,
  3766. LLM_NORM_RMS, il);
  3767. cb(cur, "ffn_norm", il);
  3768. ggml_tensor * moe_out = build_moe_ffn(ffn_inp_normed,
  3769. model.layers[il].ffn_gate_inp,
  3770. model.layers[il].ffn_up_exps,
  3771. model.layers[il].ffn_gate_exps,
  3772. model.layers[il].ffn_down_exps,
  3773. nullptr,
  3774. n_expert, n_expert_used,
  3775. LLM_FFN_SILU, false,
  3776. false, 0.0,
  3777. LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID,
  3778. il);
  3779. // Shared experts
  3780. ggml_tensor * shexp_out = build_ffn(ffn_inp_normed,
  3781. model.layers[il].ffn_up_shexp, NULL, NULL,
  3782. model.layers[il].ffn_gate_shexp, NULL, NULL,
  3783. model.layers[il].ffn_down_shexp, NULL, NULL,
  3784. NULL,
  3785. LLM_FFN_SILU, LLM_FFN_PAR, il);
  3786. cb(shexp_out, "ffn_moe_shexp", il);
  3787. cur = ggml_add(ctx0, moe_out, shexp_out);
  3788. cb(cur, "ffn_moe_out_merged", il);
  3789. } else {
  3790. // MoE branch
  3791. cur = build_norm(ffn_inp,
  3792. model.layers[il].ffn_norm, NULL,
  3793. LLM_NORM_RMS, il);
  3794. cb(cur, "ffn_norm", il);
  3795. cur = build_moe_ffn(cur,
  3796. model.layers[il].ffn_gate_inp,
  3797. model.layers[il].ffn_up_exps,
  3798. model.layers[il].ffn_gate_exps,
  3799. model.layers[il].ffn_down_exps,
  3800. nullptr,
  3801. n_expert, n_expert_used,
  3802. LLM_FFN_SILU, true,
  3803. false, 0.0,
  3804. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  3805. il);
  3806. cb(cur, "ffn_moe_out", il);
  3807. }
  3808. // For Granite architecture
  3809. if (hparams.f_residual_scale) {
  3810. cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
  3811. }
  3812. cur = ggml_add(ctx0, cur, ffn_inp);
  3813. cb(cur, "ffn_out", il);
  3814. cur = build_cvec(cur, il);
  3815. cb(cur, "l_out", il);
  3816. // input for next layer
  3817. inpL = cur;
  3818. }
  3819. cur = inpL;
  3820. cur = build_norm(cur,
  3821. model.output_norm, NULL,
  3822. LLM_NORM_RMS, -1);
  3823. cb(cur, "result_norm", -1);
  3824. res->t_embd = cur;
  3825. // lm_head
  3826. cur = build_lora_mm(model.output, cur);
  3827. // For Granite architecture
  3828. if (hparams.f_logit_scale) {
  3829. cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale);
  3830. }
  3831. cb(cur, "result_output", -1);
  3832. res->t_logits = cur;
  3833. ggml_build_forward_expand(gf, cur);
  3834. }
  3835. };
  3836. struct llm_build_deci : public llm_graph_context {
  3837. llm_build_deci(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  3838. const int64_t n_embd_head = hparams.n_embd_head_v;
  3839. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  3840. GGML_ASSERT(n_embd_head == hparams.n_rot);
  3841. ggml_tensor * cur;
  3842. ggml_tensor * inpL;
  3843. inpL = build_inp_embd(model.tok_embd);
  3844. // inp_pos - contains the positions
  3845. ggml_tensor * inp_pos = build_inp_pos();
  3846. auto * inp_attn = build_attn_inp_kv_unified();
  3847. const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
  3848. for (int il = 0; il < n_layer; ++il) {
  3849. ggml_tensor * inpSA = inpL;
  3850. const int64_t n_head_kv = hparams.n_head_kv(il);
  3851. const int64_t n_head = hparams.n_head(il);
  3852. if (n_head == 0) {
  3853. // attention-free layer of Llama-3_1-Nemotron-51B
  3854. cur = inpL;
  3855. } else {
  3856. // norm
  3857. cur = build_norm(inpL,
  3858. model.layers[il].attn_norm, NULL,
  3859. LLM_NORM_RMS, il);
  3860. cb(cur, "attn_norm", il);
  3861. }
  3862. if (n_head > 0 && n_head_kv == 0) {
  3863. // "linear attention" of Llama-3_1-Nemotron-51B
  3864. cur = build_lora_mm(model.layers[il].wo, cur);
  3865. cb(cur, "wo", il);
  3866. } else if (n_head > 0) {
  3867. // self-attention
  3868. // rope freq factors for llama3; may return nullptr for llama2 and other models
  3869. ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il);
  3870. // compute Q and K and RoPE them
  3871. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  3872. cb(Qcur, "Qcur", il);
  3873. if (model.layers[il].bq) {
  3874. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  3875. cb(Qcur, "Qcur", il);
  3876. }
  3877. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  3878. cb(Kcur, "Kcur", il);
  3879. if (model.layers[il].bk) {
  3880. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  3881. cb(Kcur, "Kcur", il);
  3882. }
  3883. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  3884. cb(Vcur, "Vcur", il);
  3885. if (model.layers[il].bv) {
  3886. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  3887. cb(Vcur, "Vcur", il);
  3888. }
  3889. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3890. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  3891. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  3892. Qcur = ggml_rope_ext(
  3893. ctx0, Qcur, inp_pos, rope_factors,
  3894. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  3895. ext_factor, attn_factor, beta_fast, beta_slow
  3896. );
  3897. Kcur = ggml_rope_ext(
  3898. ctx0, Kcur, inp_pos, rope_factors,
  3899. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  3900. ext_factor, attn_factor, beta_fast, beta_slow
  3901. );
  3902. cb(Qcur, "Qcur", il);
  3903. cb(Kcur, "Kcur", il);
  3904. cb(Vcur, "Vcur", il);
  3905. cur = build_attn(inp_attn, gf,
  3906. model.layers[il].wo, model.layers[il].bo,
  3907. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  3908. }
  3909. if (il == n_layer - 1) {
  3910. // skip computing output for unused tokens
  3911. ggml_tensor * inp_out_ids = build_inp_out_ids();
  3912. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  3913. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  3914. }
  3915. // For Granite architecture
  3916. if (hparams.f_residual_scale) {
  3917. cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
  3918. }
  3919. // modified to support attention-free layer of Llama-3_1-Nemotron-51B
  3920. ggml_tensor * ffn_inp = cur;
  3921. if (n_head > 0) {
  3922. ffn_inp = ggml_add(ctx0, cur, inpSA);
  3923. cb(ffn_inp, "ffn_inp", il);
  3924. }
  3925. // feed-forward network
  3926. if (model.layers[il].ffn_gate_inp == nullptr) {
  3927. cur = build_norm(ffn_inp,
  3928. model.layers[il].ffn_norm, NULL,
  3929. LLM_NORM_RMS, il);
  3930. cb(cur, "ffn_norm", il);
  3931. cur = build_ffn(cur,
  3932. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  3933. model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
  3934. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  3935. NULL,
  3936. LLM_FFN_SILU, LLM_FFN_PAR, il);
  3937. cb(cur, "ffn_out", il);
  3938. }
  3939. // For Granite architecture
  3940. if (hparams.f_residual_scale) {
  3941. cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
  3942. }
  3943. cur = ggml_add(ctx0, cur, ffn_inp);
  3944. cb(cur, "ffn_out", il);
  3945. cur = build_cvec(cur, il);
  3946. cb(cur, "l_out", il);
  3947. // input for next layer
  3948. inpL = cur;
  3949. }
  3950. cur = inpL;
  3951. cur = build_norm(cur,
  3952. model.output_norm, NULL,
  3953. LLM_NORM_RMS, -1);
  3954. cb(cur, "result_norm", -1);
  3955. res->t_embd = cur;
  3956. // lm_head
  3957. cur = build_lora_mm(model.output, cur);
  3958. // For Granite architecture
  3959. if (hparams.f_logit_scale) {
  3960. cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale);
  3961. }
  3962. cb(cur, "result_output", -1);
  3963. res->t_logits = cur;
  3964. ggml_build_forward_expand(gf, cur);
  3965. }
  3966. };
  3967. struct llm_build_baichuan : public llm_graph_context {
  3968. llm_build_baichuan(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  3969. const int64_t n_embd_head = hparams.n_embd_head_v;
  3970. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  3971. GGML_ASSERT(n_embd_head == hparams.n_rot);
  3972. ggml_tensor * cur;
  3973. ggml_tensor * inpL;
  3974. inpL = build_inp_embd(model.tok_embd);
  3975. // inp_pos - contains the positions
  3976. ggml_tensor * inp_pos = model.type == LLM_TYPE_7B ? build_inp_pos() : nullptr;
  3977. auto * inp_attn = build_attn_inp_kv_unified();
  3978. for (int il = 0; il < n_layer; ++il) {
  3979. ggml_tensor * inpSA = inpL;
  3980. cur = build_norm(inpL,
  3981. model.layers[il].attn_norm, NULL,
  3982. LLM_NORM_RMS, il);
  3983. cb(cur, "attn_norm", il);
  3984. // self-attention
  3985. {
  3986. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  3987. cb(Qcur, "Qcur", il);
  3988. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  3989. cb(Kcur, "Kcur", il);
  3990. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  3991. cb(Vcur, "Vcur", il);
  3992. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3993. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  3994. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  3995. switch (model.type) {
  3996. case LLM_TYPE_7B:
  3997. Qcur = ggml_rope_ext(
  3998. ctx0, Qcur, inp_pos, nullptr,
  3999. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4000. ext_factor, attn_factor, beta_fast, beta_slow
  4001. );
  4002. Kcur = ggml_rope_ext(
  4003. ctx0, Kcur, inp_pos, nullptr,
  4004. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4005. ext_factor, attn_factor, beta_fast, beta_slow
  4006. );
  4007. break;
  4008. case LLM_TYPE_13B:
  4009. break;
  4010. default:
  4011. GGML_ABORT("fatal error");
  4012. }
  4013. cb(Qcur, "Qcur", il);
  4014. cb(Kcur, "Kcur", il);
  4015. cb(Vcur, "Vcur", il);
  4016. cur = build_attn(inp_attn, gf,
  4017. model.layers[il].wo, NULL,
  4018. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4019. }
  4020. if (il == n_layer - 1) {
  4021. // skip computing output for unused tokens
  4022. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4023. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4024. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4025. }
  4026. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4027. cb(ffn_inp, "ffn_inp", il);
  4028. // feed-forward network
  4029. {
  4030. cur = build_norm(ffn_inp,
  4031. model.layers[il].ffn_norm, NULL,
  4032. LLM_NORM_RMS, il);
  4033. cb(cur, "ffn_norm", il);
  4034. cur = build_ffn(cur,
  4035. model.layers[il].ffn_up, NULL, NULL,
  4036. model.layers[il].ffn_gate, NULL, NULL,
  4037. model.layers[il].ffn_down, NULL, NULL,
  4038. NULL,
  4039. LLM_FFN_SILU, LLM_FFN_PAR, il);
  4040. cb(cur, "ffn_out", il);
  4041. }
  4042. cur = ggml_add(ctx0, cur, ffn_inp);
  4043. cur = build_cvec(cur, il);
  4044. cb(cur, "l_out", il);
  4045. // input for next layer
  4046. inpL = cur;
  4047. }
  4048. cur = inpL;
  4049. cur = build_norm(cur,
  4050. model.output_norm, NULL,
  4051. LLM_NORM_RMS, -1);
  4052. cb(cur, "result_norm", -1);
  4053. res->t_embd = cur;
  4054. // lm_head
  4055. cur = build_lora_mm(model.output, cur);
  4056. cb(cur, "result_output", -1);
  4057. res->t_logits = cur;
  4058. ggml_build_forward_expand(gf, cur);
  4059. }
  4060. };
  4061. struct llm_build_xverse : public llm_graph_context {
  4062. llm_build_xverse(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4063. const int64_t n_embd_head = hparams.n_embd_head_v;
  4064. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4065. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4066. ggml_tensor * cur;
  4067. ggml_tensor * inpL;
  4068. inpL = build_inp_embd(model.tok_embd);
  4069. // inp_pos - contains the positions
  4070. ggml_tensor * inp_pos = build_inp_pos();
  4071. auto * inp_attn = build_attn_inp_kv_unified();
  4072. for (int il = 0; il < n_layer; ++il) {
  4073. ggml_tensor * inpSA = inpL;
  4074. cur = build_norm(inpL,
  4075. model.layers[il].attn_norm, NULL,
  4076. LLM_NORM_RMS, il);
  4077. cb(cur, "attn_norm", il);
  4078. // self-attention
  4079. {
  4080. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  4081. cb(Qcur, "Qcur", il);
  4082. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  4083. cb(Kcur, "Kcur", il);
  4084. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  4085. cb(Vcur, "Vcur", il);
  4086. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4087. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4088. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4089. Qcur = ggml_rope_ext(
  4090. ctx0, Qcur, inp_pos, nullptr,
  4091. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4092. ext_factor, attn_factor, beta_fast, beta_slow
  4093. );
  4094. Kcur = ggml_rope_ext(
  4095. ctx0, Kcur, inp_pos, nullptr,
  4096. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4097. ext_factor, attn_factor, beta_fast, beta_slow
  4098. );
  4099. cb(Qcur, "Qcur", il);
  4100. cb(Kcur, "Kcur", il);
  4101. cb(Vcur, "Vcur", il);
  4102. cur = build_attn(inp_attn, gf,
  4103. model.layers[il].wo, NULL,
  4104. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4105. }
  4106. if (il == n_layer - 1) {
  4107. // skip computing output for unused tokens
  4108. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4109. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4110. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4111. }
  4112. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4113. cb(ffn_inp, "ffn_inp", il);
  4114. // feed-forward network
  4115. {
  4116. cur = build_norm(ffn_inp,
  4117. model.layers[il].ffn_norm, NULL,
  4118. LLM_NORM_RMS, il);
  4119. cb(cur, "ffn_norm", il);
  4120. cur = build_ffn(cur,
  4121. model.layers[il].ffn_up, NULL, NULL,
  4122. model.layers[il].ffn_gate, NULL, NULL,
  4123. model.layers[il].ffn_down, NULL, NULL,
  4124. NULL,
  4125. LLM_FFN_SILU, LLM_FFN_PAR, il);
  4126. cb(cur, "ffn_out", il);
  4127. }
  4128. cur = ggml_add(ctx0, cur, ffn_inp);
  4129. cur = build_cvec(cur, il);
  4130. cb(cur, "l_out", il);
  4131. // input for next layer
  4132. inpL = cur;
  4133. }
  4134. cur = inpL;
  4135. cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
  4136. cb(cur, "result_norm", -1);
  4137. res->t_embd = cur;
  4138. // lm_head
  4139. cur = build_lora_mm(model.output, cur);
  4140. cb(cur, "result_output", -1);
  4141. res->t_logits = cur;
  4142. ggml_build_forward_expand(gf, cur);
  4143. }
  4144. };
  4145. struct llm_build_falcon : public llm_graph_context {
  4146. llm_build_falcon(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4147. const int64_t n_embd_head = hparams.n_embd_head_v;
  4148. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4149. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4150. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4151. ggml_tensor * cur;
  4152. ggml_tensor * inpL;
  4153. inpL = build_inp_embd(model.tok_embd);
  4154. // inp_pos - contains the positions
  4155. ggml_tensor * inp_pos = build_inp_pos();
  4156. auto * inp_attn = build_attn_inp_kv_unified();
  4157. for (int il = 0; il < n_layer; ++il) {
  4158. ggml_tensor * attn_norm;
  4159. attn_norm = build_norm(inpL,
  4160. model.layers[il].attn_norm,
  4161. model.layers[il].attn_norm_b,
  4162. LLM_NORM, il);
  4163. cb(attn_norm, "attn_norm", il);
  4164. // self-attention
  4165. {
  4166. if (model.layers[il].attn_norm_2) {
  4167. // Falcon-40B
  4168. cur = build_norm(inpL,
  4169. model.layers[il].attn_norm_2,
  4170. model.layers[il].attn_norm_2_b,
  4171. LLM_NORM, il);
  4172. cb(cur, "attn_norm_2", il);
  4173. } else {
  4174. cur = attn_norm;
  4175. }
  4176. cur = build_lora_mm(model.layers[il].wqkv, cur);
  4177. cb(cur, "wqkv", il);
  4178. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4179. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4180. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4181. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4182. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4183. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4184. // using mode = 2 for neox mode
  4185. Qcur = ggml_rope_ext(
  4186. ctx0, Qcur, inp_pos, nullptr,
  4187. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4188. ext_factor, attn_factor, beta_fast, beta_slow
  4189. );
  4190. Kcur = ggml_rope_ext(
  4191. ctx0, Kcur, inp_pos, nullptr,
  4192. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4193. ext_factor, attn_factor, beta_fast, beta_slow
  4194. );
  4195. cb(Qcur, "Qcur", il);
  4196. cb(Kcur, "Kcur", il);
  4197. cb(Vcur, "Vcur", il);
  4198. cur = build_attn(inp_attn, gf,
  4199. model.layers[il].wo, NULL,
  4200. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4201. }
  4202. if (il == n_layer - 1) {
  4203. // skip computing output for unused tokens
  4204. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4205. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4206. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  4207. attn_norm = ggml_get_rows(ctx0, attn_norm, inp_out_ids);
  4208. }
  4209. ggml_tensor * ffn_inp = cur;
  4210. // feed forward
  4211. {
  4212. cur = build_ffn(attn_norm, // !! use the attn norm, not the result
  4213. model.layers[il].ffn_up, NULL, NULL,
  4214. NULL, NULL, NULL,
  4215. model.layers[il].ffn_down, NULL, NULL,
  4216. NULL,
  4217. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  4218. cb(cur, "ffn_out", il);
  4219. }
  4220. cur = ggml_add(ctx0, cur, ffn_inp);
  4221. cur = ggml_add(ctx0, cur, inpL);
  4222. cur = build_cvec(cur, il);
  4223. cb(cur, "l_out", il);
  4224. // input for next layer
  4225. inpL = cur;
  4226. }
  4227. cur = inpL;
  4228. // norm
  4229. cur = build_norm(cur,
  4230. model.output_norm,
  4231. model.output_norm_b,
  4232. LLM_NORM, -1);
  4233. cb(cur, "result_norm", -1);
  4234. res->t_embd = cur;
  4235. cur = build_lora_mm(model.output, cur);
  4236. cb(cur, "result_output", -1);
  4237. res->t_logits = cur;
  4238. ggml_build_forward_expand(gf, cur);
  4239. }
  4240. };
  4241. struct llm_build_grok : public llm_graph_context {
  4242. llm_build_grok(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4243. const int64_t n_embd_head = hparams.n_embd_head_v;
  4244. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4245. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4246. ggml_tensor * cur;
  4247. ggml_tensor * inpL;
  4248. inpL = build_inp_embd(model.tok_embd);
  4249. // multiply by embedding_multiplier_scale of 78.38367176906169
  4250. inpL = ggml_scale(ctx0, inpL, 78.38367176906169f);
  4251. // inp_pos - contains the positions
  4252. ggml_tensor * inp_pos = build_inp_pos();
  4253. auto * inp_attn = build_attn_inp_kv_unified();
  4254. for (int il = 0; il < n_layer; ++il) {
  4255. ggml_tensor * inpSA = inpL;
  4256. // norm
  4257. cur = build_norm(inpL,
  4258. model.layers[il].attn_norm, NULL,
  4259. LLM_NORM_RMS, il);
  4260. cb(cur, "attn_norm", il);
  4261. // self-attention
  4262. {
  4263. // compute Q and K and RoPE them
  4264. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  4265. cb(Qcur, "Qcur", il);
  4266. if (model.layers[il].bq) {
  4267. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  4268. cb(Qcur, "Qcur", il);
  4269. }
  4270. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  4271. cb(Kcur, "Kcur", il);
  4272. if (model.layers[il].bk) {
  4273. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  4274. cb(Kcur, "Kcur", il);
  4275. }
  4276. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  4277. cb(Vcur, "Vcur", il);
  4278. if (model.layers[il].bv) {
  4279. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  4280. cb(Vcur, "Vcur", il);
  4281. }
  4282. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4283. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4284. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4285. Qcur = ggml_rope_ext(
  4286. ctx0, Qcur, inp_pos, nullptr,
  4287. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4288. ext_factor, attn_factor, beta_fast, beta_slow
  4289. );
  4290. Kcur = ggml_rope_ext(
  4291. ctx0, Kcur, inp_pos, nullptr,
  4292. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4293. ext_factor, attn_factor, beta_fast, beta_slow
  4294. );
  4295. cb(Qcur, "Qcur", il);
  4296. cb(Kcur, "Kcur", il);
  4297. cb(Vcur, "Vcur", il);
  4298. cur = build_attn(inp_attn, gf,
  4299. model.layers[il].wo, model.layers[il].bo,
  4300. Qcur, Kcur, Vcur, nullptr, 1.0f, il);
  4301. }
  4302. if (il == n_layer - 1) {
  4303. // skip computing output for unused tokens
  4304. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4305. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4306. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4307. }
  4308. // Grok
  4309. // if attn_out_norm is present then apply it before adding the input
  4310. if (model.layers[il].attn_out_norm) {
  4311. cur = build_norm(cur,
  4312. model.layers[il].attn_out_norm, NULL,
  4313. LLM_NORM_RMS, il);
  4314. cb(cur, "attn_out_norm", il);
  4315. }
  4316. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4317. cb(ffn_inp, "ffn_inp", il);
  4318. // feed-forward network
  4319. // MoE branch
  4320. cur = build_norm(ffn_inp,
  4321. model.layers[il].ffn_norm, NULL,
  4322. LLM_NORM_RMS, il);
  4323. cb(cur, "ffn_norm", il);
  4324. cur = build_moe_ffn(cur,
  4325. model.layers[il].ffn_gate_inp,
  4326. model.layers[il].ffn_up_exps,
  4327. model.layers[il].ffn_gate_exps,
  4328. model.layers[il].ffn_down_exps,
  4329. nullptr,
  4330. n_expert, n_expert_used,
  4331. LLM_FFN_GELU, true,
  4332. false, 0.0,
  4333. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  4334. il);
  4335. cb(cur, "ffn_moe_out", il);
  4336. // Grok
  4337. // if layer_out_norm is present then apply it before adding the input
  4338. // Idea: maybe ffn_out_norm is a better name
  4339. if (model.layers[il].layer_out_norm) {
  4340. cur = build_norm(cur,
  4341. model.layers[il].layer_out_norm, NULL,
  4342. LLM_NORM_RMS, il);
  4343. cb(cur, "layer_out_norm", il);
  4344. }
  4345. cur = ggml_add(ctx0, cur, ffn_inp);
  4346. cb(cur, "ffn_out", il);
  4347. cur = build_cvec(cur, il);
  4348. cb(cur, "l_out", il);
  4349. // input for next layer
  4350. inpL = cur;
  4351. }
  4352. cur = inpL;
  4353. cur = build_norm(cur,
  4354. model.output_norm, NULL,
  4355. LLM_NORM_RMS, -1);
  4356. cb(cur, "result_norm", -1);
  4357. res->t_embd = cur;
  4358. // lm_head
  4359. cur = build_lora_mm(model.output, cur);
  4360. // Grok
  4361. // multiply logits by output_multiplier_scale of 0.5773502691896257
  4362. cur = ggml_scale(ctx0, cur, 0.5773502691896257f);
  4363. cb(cur, "result_output", -1);
  4364. res->t_logits = cur;
  4365. ggml_build_forward_expand(gf, cur);
  4366. }
  4367. };
  4368. struct llm_build_dbrx : public llm_graph_context {
  4369. llm_build_dbrx(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4370. const int64_t n_embd_head = hparams.n_embd_head_v;
  4371. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4372. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4373. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4374. ggml_tensor * cur;
  4375. ggml_tensor * inpL;
  4376. inpL = build_inp_embd(model.tok_embd);
  4377. // inp_pos - contains the positions
  4378. ggml_tensor * inp_pos = build_inp_pos();
  4379. auto * inp_attn = build_attn_inp_kv_unified();
  4380. for (int il = 0; il < n_layer; ++il) {
  4381. ggml_tensor * inpSA = inpL;
  4382. // norm
  4383. cur = build_norm(inpL,
  4384. model.layers[il].attn_norm, NULL,
  4385. LLM_NORM, il);
  4386. cb(cur, "attn_norm", il);
  4387. // self-attention
  4388. {
  4389. ggml_tensor * Qcur = nullptr;
  4390. ggml_tensor * Kcur = nullptr;
  4391. ggml_tensor * Vcur = nullptr;
  4392. cur = build_lora_mm(model.layers[il].wqkv, cur);
  4393. cb(cur, "wqkv", il);
  4394. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  4395. cb(cur, "wqkv_clamped", il);
  4396. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4397. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4398. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4399. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4400. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4401. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4402. Qcur = ggml_rope_ext(
  4403. ctx0, Qcur, inp_pos, nullptr,
  4404. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4405. ext_factor, attn_factor, beta_fast, beta_slow
  4406. );
  4407. Kcur = ggml_rope_ext(
  4408. ctx0, Kcur, inp_pos, nullptr,
  4409. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4410. ext_factor, attn_factor, beta_fast, beta_slow
  4411. );
  4412. cb(Qcur, "Qcur", il);
  4413. cb(Kcur, "Kcur", il);
  4414. cb(Vcur, "Vcur", il);
  4415. cur = build_attn(inp_attn, gf,
  4416. model.layers[il].wo, NULL,
  4417. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4418. }
  4419. if (il == n_layer - 1) {
  4420. // skip computing output for unused tokens
  4421. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4422. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4423. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4424. }
  4425. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4426. cb(ffn_inp, "ffn_inp", il);
  4427. // feed-forward network
  4428. // MoE branch
  4429. cur = build_norm(ffn_inp,
  4430. model.layers[il].attn_out_norm, NULL,
  4431. LLM_NORM, il);
  4432. cb(cur, "attn_out_norm", il);
  4433. cur = build_moe_ffn(cur,
  4434. model.layers[il].ffn_gate_inp,
  4435. model.layers[il].ffn_up_exps,
  4436. model.layers[il].ffn_gate_exps,
  4437. model.layers[il].ffn_down_exps,
  4438. nullptr,
  4439. n_expert, n_expert_used,
  4440. LLM_FFN_SILU, true,
  4441. false, 0.0,
  4442. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  4443. il);
  4444. cb(cur, "ffn_moe_out", il);
  4445. cur = ggml_add(ctx0, cur, ffn_inp);
  4446. cb(cur, "ffn_out", il);
  4447. cur = build_cvec(cur, il);
  4448. cb(cur, "l_out", il);
  4449. // input for next layer
  4450. inpL = cur;
  4451. }
  4452. cur = inpL;
  4453. cur = build_norm(cur,
  4454. model.output_norm, NULL,
  4455. LLM_NORM, -1);
  4456. cb(cur, "result_norm", -1);
  4457. res->t_embd = cur;
  4458. // lm_head
  4459. cur = build_lora_mm(model.output, cur);
  4460. cb(cur, "result_output", -1);
  4461. res->t_logits = cur;
  4462. ggml_build_forward_expand(gf, cur);
  4463. }
  4464. };
  4465. struct llm_build_starcoder : public llm_graph_context {
  4466. llm_build_starcoder(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4467. const int64_t n_embd_head = hparams.n_embd_head_v;
  4468. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4469. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4470. ggml_tensor * cur;
  4471. ggml_tensor * inpL;
  4472. inpL = build_inp_embd(model.tok_embd);
  4473. // inp_pos - contains the positions
  4474. ggml_tensor * inp_pos = build_inp_pos();
  4475. auto * inp_attn = build_attn_inp_kv_unified();
  4476. ggml_tensor * pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  4477. cb(pos, "pos_embd", -1);
  4478. inpL = ggml_add(ctx0, inpL, pos);
  4479. cb(inpL, "inpL", -1);
  4480. for (int il = 0; il < n_layer; ++il) {
  4481. cur = build_norm(inpL,
  4482. model.layers[il].attn_norm,
  4483. model.layers[il].attn_norm_b,
  4484. LLM_NORM, il);
  4485. cb(cur, "attn_norm", il);
  4486. // self-attention
  4487. {
  4488. cur = build_lora_mm(model.layers[il].wqkv, cur);
  4489. cb(cur, "wqkv", il);
  4490. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4491. cb(cur, "bqkv", il);
  4492. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4493. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4494. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4495. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4496. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4497. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4498. cb(Qcur, "Qcur", il);
  4499. cb(Kcur, "Kcur", il);
  4500. cb(Vcur, "Vcur", il);
  4501. cur = build_attn(inp_attn, gf,
  4502. model.layers[il].wo, model.layers[il].bo,
  4503. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4504. }
  4505. if (il == n_layer - 1) {
  4506. // skip computing output for unused tokens
  4507. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4508. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4509. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  4510. }
  4511. // add the input
  4512. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4513. cb(ffn_inp, "ffn_inp", il);
  4514. // FF
  4515. {
  4516. cur = build_norm(ffn_inp,
  4517. model.layers[il].ffn_norm,
  4518. model.layers[il].ffn_norm_b,
  4519. LLM_NORM, il);
  4520. cb(cur, "ffn_norm", il);
  4521. cur = build_ffn(cur,
  4522. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  4523. NULL, NULL, NULL,
  4524. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  4525. NULL,
  4526. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  4527. cb(cur, "ffn_out", il);
  4528. }
  4529. cur = ggml_add(ctx0, cur, ffn_inp);
  4530. cur = build_cvec(cur, il);
  4531. cb(cur, "l_out", il);
  4532. // input for next layer
  4533. inpL = cur;
  4534. }
  4535. cur = build_norm(inpL,
  4536. model.output_norm,
  4537. model.output_norm_b,
  4538. LLM_NORM, -1);
  4539. cb(cur, "result_norm", -1);
  4540. res->t_embd = cur;
  4541. cur = build_lora_mm(model.output, cur);
  4542. cb(cur, "result_output", -1);
  4543. res->t_logits = cur;
  4544. ggml_build_forward_expand(gf, cur);
  4545. }
  4546. };
  4547. struct llm_build_refact : public llm_graph_context {
  4548. llm_build_refact(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4549. const int64_t n_embd_head = hparams.n_embd_head_v;
  4550. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4551. ggml_tensor * cur;
  4552. ggml_tensor * inpL;
  4553. inpL = build_inp_embd(model.tok_embd);
  4554. auto * inp_attn = build_attn_inp_kv_unified();
  4555. for (int il = 0; il < n_layer; ++il) {
  4556. ggml_tensor * inpSA = inpL;
  4557. cur = build_norm(inpL,
  4558. model.layers[il].attn_norm, NULL,
  4559. LLM_NORM_RMS, il);
  4560. cb(cur, "attn_norm", il);
  4561. // self-attention
  4562. {
  4563. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  4564. cb(Qcur, "Qcur", il);
  4565. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  4566. cb(Kcur, "Kcur", il);
  4567. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  4568. cb(Vcur, "Vcur", il);
  4569. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4570. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4571. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4572. cb(Qcur, "Qcur", il);
  4573. cb(Kcur, "Kcur", il);
  4574. cb(Vcur, "Vcur", il);
  4575. cur = build_attn(inp_attn, gf,
  4576. model.layers[il].wo, NULL,
  4577. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4578. }
  4579. if (il == n_layer - 1) {
  4580. // skip computing output for unused tokens
  4581. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4582. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4583. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  4584. }
  4585. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4586. cb(ffn_inp, "ffn_inp", il);
  4587. // feed-forward network
  4588. {
  4589. cur = build_norm(ffn_inp,
  4590. model.layers[il].ffn_norm, NULL,
  4591. LLM_NORM_RMS, il);
  4592. cb(cur, "ffn_norm", il);
  4593. cur = build_ffn(cur,
  4594. model.layers[il].ffn_up, NULL, NULL,
  4595. model.layers[il].ffn_gate, NULL, NULL,
  4596. model.layers[il].ffn_down, NULL, NULL,
  4597. NULL,
  4598. LLM_FFN_SILU, LLM_FFN_PAR, il);
  4599. cb(cur, "ffn_out", il);
  4600. }
  4601. cur = ggml_add(ctx0, cur, ffn_inp);
  4602. cur = build_cvec(cur, il);
  4603. cb(cur, "l_out", il);
  4604. // input for next layer
  4605. inpL = cur;
  4606. }
  4607. cur = inpL;
  4608. cur = build_norm(cur,
  4609. model.output_norm, NULL,
  4610. LLM_NORM_RMS, -1);
  4611. cb(cur, "result_norm", -1);
  4612. res->t_embd = cur;
  4613. // lm_head
  4614. cur = build_lora_mm(model.output, cur);
  4615. cb(cur, "result_output", -1);
  4616. res->t_logits = cur;
  4617. ggml_build_forward_expand(gf, cur);
  4618. }
  4619. };
  4620. struct llm_build_bert : public llm_graph_context {
  4621. llm_build_bert(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4622. const int64_t n_embd_head = hparams.n_embd_head_v;
  4623. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4624. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4625. ggml_tensor * cur;
  4626. ggml_tensor * inpL;
  4627. ggml_tensor * inp_pos = nullptr;
  4628. if (model.arch != LLM_ARCH_JINA_BERT_V2) {
  4629. inp_pos = build_inp_pos();
  4630. }
  4631. // construct input embeddings (token, type, position)
  4632. inpL = build_inp_embd(model.tok_embd);
  4633. // token types are hardcoded to zero ("Sentence A")
  4634. ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0);
  4635. inpL = ggml_add(ctx0, inpL, type_row0);
  4636. if (model.arch == LLM_ARCH_BERT) {
  4637. inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL);
  4638. }
  4639. cb(inpL, "inp_embd", -1);
  4640. // embed layer norm
  4641. inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1);
  4642. cb(inpL, "inp_norm", -1);
  4643. auto * inp_attn = build_attn_inp_no_cache();
  4644. // iterate layers
  4645. for (int il = 0; il < n_layer; ++il) {
  4646. ggml_tensor * cur = inpL;
  4647. ggml_tensor * Qcur;
  4648. ggml_tensor * Kcur;
  4649. ggml_tensor * Vcur;
  4650. // self-attention
  4651. if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) {
  4652. Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq);
  4653. if (model.layers[il].attn_q_norm) {
  4654. Qcur = build_norm(Qcur,
  4655. model.layers[il].attn_q_norm,
  4656. model.layers[il].attn_q_norm_b,
  4657. LLM_NORM, il);
  4658. }
  4659. Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk);
  4660. if (model.layers[il].attn_k_norm) {
  4661. Kcur = build_norm(Kcur,
  4662. model.layers[il].attn_k_norm,
  4663. model.layers[il].attn_k_norm_b,
  4664. LLM_NORM, il);
  4665. }
  4666. Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv);
  4667. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4668. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4669. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4670. } else {
  4671. // compute Q and K and RoPE them
  4672. cur = build_lora_mm(model.layers[il].wqkv, cur);
  4673. cb(cur, "wqkv", il);
  4674. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4675. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4676. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4677. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4678. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4679. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4680. Qcur = ggml_rope_ext(
  4681. ctx0, Qcur, inp_pos, nullptr,
  4682. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4683. ext_factor, attn_factor, beta_fast, beta_slow
  4684. );
  4685. Kcur = ggml_rope_ext(
  4686. ctx0, Kcur, inp_pos, nullptr,
  4687. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  4688. ext_factor, attn_factor, beta_fast, beta_slow
  4689. );
  4690. }
  4691. cb(Qcur, "Qcur", il);
  4692. cb(Kcur, "Kcur", il);
  4693. cb(Vcur, "Vcur", il);
  4694. cur = build_attn(inp_attn, gf,
  4695. model.layers[il].wo, model.layers[il].bo,
  4696. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4697. cb(cur, "kqv_out", il);
  4698. if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) {
  4699. // skip computing output for unused tokens
  4700. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4701. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4702. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  4703. }
  4704. // re-add the layer input
  4705. cur = ggml_add(ctx0, cur, inpL);
  4706. // attention layer norm
  4707. cur = build_norm(cur, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, il);
  4708. if (model.layers[il].attn_norm_2 != nullptr) {
  4709. cur = ggml_add(ctx0, cur, inpL); // re-add the layer input
  4710. cur = build_norm(cur, model.layers[il].attn_norm_2, model.layers[il].attn_norm_2_b, LLM_NORM, il);
  4711. }
  4712. ggml_tensor * ffn_inp = cur;
  4713. cb(ffn_inp, "ffn_inp", il);
  4714. // feed-forward network
  4715. if (model.arch == LLM_ARCH_BERT) {
  4716. cur = build_ffn(cur,
  4717. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  4718. NULL, NULL, NULL,
  4719. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  4720. NULL,
  4721. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  4722. } else if (model.arch == LLM_ARCH_JINA_BERT_V2) {
  4723. cur = build_ffn(cur,
  4724. model.layers[il].ffn_up, NULL, NULL,
  4725. model.layers[il].ffn_gate, NULL, NULL,
  4726. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  4727. NULL,
  4728. LLM_FFN_GELU, LLM_FFN_PAR, il);
  4729. } else {
  4730. cur = build_ffn(cur,
  4731. model.layers[il].ffn_up, NULL, NULL,
  4732. model.layers[il].ffn_gate, NULL, NULL,
  4733. model.layers[il].ffn_down, NULL, NULL,
  4734. NULL,
  4735. LLM_FFN_SILU, LLM_FFN_PAR, il);
  4736. }
  4737. cb(cur, "ffn_out", il);
  4738. // attentions bypass the intermediate layer
  4739. cur = ggml_add(ctx0, cur, ffn_inp);
  4740. // output layer norm
  4741. cur = build_norm(cur, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, il);
  4742. // input for next layer
  4743. inpL = cur;
  4744. }
  4745. cur = inpL;
  4746. cb(cur, "result_embd", -1);
  4747. res->t_embd = cur;
  4748. ggml_build_forward_expand(gf, cur);
  4749. }
  4750. };
  4751. struct llm_build_bloom : public llm_graph_context {
  4752. llm_build_bloom(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4753. const int64_t n_embd_head = hparams.n_embd_head_v;
  4754. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4755. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4756. ggml_tensor * cur;
  4757. ggml_tensor * inpL;
  4758. inpL = build_inp_embd(model.tok_embd);
  4759. auto * inp_attn = build_attn_inp_kv_unified();
  4760. inpL = build_norm(inpL,
  4761. model.tok_norm,
  4762. model.tok_norm_b,
  4763. LLM_NORM, -1);
  4764. cb(inpL, "inp_norm", -1);
  4765. for (int il = 0; il < n_layer; ++il) {
  4766. cur = build_norm(inpL,
  4767. model.layers[il].attn_norm,
  4768. model.layers[il].attn_norm_b,
  4769. LLM_NORM, il);
  4770. cb(cur, "attn_norm", il);
  4771. // self-attention
  4772. {
  4773. cur = build_lora_mm(model.layers[il].wqkv, cur);
  4774. cb(cur, "wqkv", il);
  4775. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4776. cb(cur, "bqkv", il);
  4777. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4778. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4779. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4780. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4781. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4782. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4783. cb(Qcur, "Qcur", il);
  4784. cb(Kcur, "Kcur", il);
  4785. cb(Vcur, "Vcur", il);
  4786. cur = build_attn(inp_attn, gf,
  4787. model.layers[il].wo, model.layers[il].bo,
  4788. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4789. }
  4790. if (il == n_layer - 1) {
  4791. // skip computing output for unused tokens
  4792. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4793. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4794. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  4795. }
  4796. // Add the input
  4797. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4798. cb(ffn_inp, "ffn_inp", il);
  4799. // FF
  4800. {
  4801. cur = build_norm(ffn_inp,
  4802. model.layers[il].ffn_norm,
  4803. model.layers[il].ffn_norm_b,
  4804. LLM_NORM, il);
  4805. cb(cur, "ffn_norm", il);
  4806. cur = build_ffn(cur,
  4807. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  4808. NULL, NULL, NULL,
  4809. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  4810. NULL,
  4811. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  4812. cb(cur, "ffn_out", il);
  4813. }
  4814. cur = ggml_add(ctx0, cur, ffn_inp);
  4815. cur = build_cvec(cur, il);
  4816. cb(cur, "l_out", il);
  4817. // input for next layer
  4818. inpL = cur;
  4819. }
  4820. cur = build_norm(inpL,
  4821. model.output_norm,
  4822. model.output_norm_b,
  4823. LLM_NORM, -1);
  4824. cb(cur, "result_norm", -1);
  4825. res->t_embd = cur;
  4826. cur = build_lora_mm(model.output, cur);
  4827. cb(cur, "result_output", -1);
  4828. res->t_logits = cur;
  4829. ggml_build_forward_expand(gf, cur);
  4830. }
  4831. };
  4832. struct llm_build_mpt : public llm_graph_context {
  4833. llm_build_mpt(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4834. const int64_t n_embd_head = hparams.n_embd_head_v;
  4835. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4836. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4837. ggml_tensor * cur;
  4838. ggml_tensor * pos;
  4839. ggml_tensor * inpL;
  4840. inpL = build_inp_embd(model.tok_embd);
  4841. auto * inp_attn = build_attn_inp_kv_unified();
  4842. if (model.pos_embd) {
  4843. // inp_pos - contains the positions
  4844. ggml_tensor * inp_pos = build_inp_pos();
  4845. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  4846. cb(pos, "pos_embd", -1);
  4847. inpL = ggml_add(ctx0, inpL, pos);
  4848. cb(inpL, "inpL", -1);
  4849. }
  4850. for (int il = 0; il < n_layer; ++il) {
  4851. ggml_tensor * attn_norm;
  4852. attn_norm = build_norm(inpL,
  4853. model.layers[il].attn_norm,
  4854. model.layers[il].attn_norm_b,
  4855. LLM_NORM, il);
  4856. cb(attn_norm, "attn_norm", il);
  4857. // self-attention
  4858. {
  4859. cur = attn_norm;
  4860. cur = build_lora_mm(model.layers[il].wqkv, cur);
  4861. cb(cur, "wqkv", il);
  4862. if (model.layers[il].bqkv){
  4863. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4864. cb(cur, "bqkv", il);
  4865. }
  4866. if (hparams.f_clamp_kqv > 0.0f) {
  4867. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  4868. cb(cur, "wqkv_clamped", il);
  4869. }
  4870. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4871. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4872. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4873. cb(Qcur, "Qcur", il);
  4874. cb(Kcur, "Kcur", il);
  4875. cb(Vcur, "Vcur", il);
  4876. // Q/K Layernorm
  4877. if (model.layers[il].attn_q_norm) {
  4878. Qcur = build_norm(Qcur,
  4879. model.layers[il].attn_q_norm,
  4880. model.layers[il].attn_q_norm_b,
  4881. LLM_NORM, il);
  4882. cb(Qcur, "Qcur", il);
  4883. Kcur = build_norm(Kcur,
  4884. model.layers[il].attn_k_norm,
  4885. model.layers[il].attn_k_norm_b,
  4886. LLM_NORM, il);
  4887. cb(Kcur, "Kcur", il);
  4888. }
  4889. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4890. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4891. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4892. cb(Qcur, "Qcur", il);
  4893. cb(Kcur, "Kcur", il);
  4894. cb(Vcur, "Vcur", il);
  4895. cur = build_attn(inp_attn, gf,
  4896. model.layers[il].wo, model.layers[il].bo,
  4897. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  4898. }
  4899. if (il == n_layer - 1) {
  4900. // skip computing output for unused tokens
  4901. ggml_tensor * inp_out_ids = build_inp_out_ids();
  4902. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  4903. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  4904. }
  4905. // Add the input
  4906. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4907. cb(ffn_inp, "ffn_inp", il);
  4908. // feed forward
  4909. {
  4910. cur = build_norm(ffn_inp,
  4911. model.layers[il].ffn_norm,
  4912. model.layers[il].ffn_norm_b,
  4913. LLM_NORM, il);
  4914. cb(cur, "ffn_norm", il);
  4915. cur = build_ffn(cur,
  4916. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  4917. NULL, NULL, NULL,
  4918. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  4919. model.layers[il].ffn_act,
  4920. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  4921. cb(cur, "ffn_out", il);
  4922. }
  4923. cur = ggml_add(ctx0, cur, ffn_inp);
  4924. cur = build_cvec(cur, il);
  4925. cb(cur, "l_out", il);
  4926. // input for next layer
  4927. inpL = cur;
  4928. }
  4929. cur = inpL;
  4930. cur = build_norm(cur,
  4931. model.output_norm,
  4932. model.output_norm_b,
  4933. LLM_NORM, -1);
  4934. cb(cur, "result_norm", -1);
  4935. res->t_embd = cur;
  4936. cur = build_lora_mm(model.output, cur);
  4937. cb(cur, "result_output", -1);
  4938. res->t_logits = cur;
  4939. ggml_build_forward_expand(gf, cur);
  4940. }
  4941. };
  4942. struct llm_build_stablelm : public llm_graph_context {
  4943. llm_build_stablelm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  4944. const int64_t n_embd_head = hparams.n_embd_head_v;
  4945. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4946. ggml_tensor * cur;
  4947. ggml_tensor * inpL;
  4948. inpL = build_inp_embd(model.tok_embd);
  4949. // inp_pos - contains the positions
  4950. ggml_tensor * inp_pos = build_inp_pos();
  4951. auto * inp_attn = build_attn_inp_kv_unified();
  4952. for (int il = 0; il < n_layer; ++il) {
  4953. // norm
  4954. cur = build_norm(inpL,
  4955. model.layers[il].attn_norm,
  4956. model.layers[il].attn_norm_b,
  4957. LLM_NORM, il);
  4958. cb(cur, "attn_norm", il);
  4959. ggml_tensor * inpSA = cur;
  4960. // self-attention
  4961. {
  4962. // compute Q and K and RoPE them
  4963. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  4964. cb(Qcur, "Qcur", il);
  4965. if (model.layers[il].bq) {
  4966. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  4967. cb(Qcur, "Qcur", il);
  4968. }
  4969. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  4970. cb(Kcur, "Kcur", il);
  4971. if (model.layers[il].bk) {
  4972. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  4973. cb(Kcur, "Kcur", il);
  4974. }
  4975. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  4976. cb(Vcur, "Vcur", il);
  4977. if (model.layers[il].bv) {
  4978. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  4979. cb(Vcur, "Vcur", il);
  4980. }
  4981. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4982. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4983. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  4984. if (model.layers[il].attn_q_norm) {
  4985. Qcur = build_norm(Qcur,
  4986. model.layers[il].attn_q_norm,
  4987. NULL,
  4988. LLM_NORM, il);
  4989. cb(Qcur, "Qcur", il);
  4990. }
  4991. if (model.layers[il].attn_k_norm) {
  4992. Kcur = build_norm(Kcur,
  4993. model.layers[il].attn_k_norm,
  4994. NULL,
  4995. LLM_NORM, il);
  4996. cb(Kcur, "Kcur", il);
  4997. }
  4998. Qcur = ggml_rope_ext(
  4999. ctx0, Qcur, inp_pos, nullptr,
  5000. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5001. ext_factor, attn_factor, beta_fast, beta_slow
  5002. );
  5003. Kcur = ggml_rope_ext(
  5004. ctx0, Kcur, inp_pos, nullptr,
  5005. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5006. ext_factor, attn_factor, beta_fast, beta_slow
  5007. );
  5008. cb(Qcur, "Qcur", il);
  5009. cb(Kcur, "Kcur", il);
  5010. cb(Vcur, "Vcur", il);
  5011. cur = build_attn(inp_attn, gf,
  5012. model.layers[il].wo, NULL,
  5013. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5014. }
  5015. if (il == n_layer - 1) {
  5016. // skip computing output for unused tokens
  5017. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5018. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5019. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  5020. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5021. }
  5022. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5023. cb(ffn_inp, "ffn_inp", il);
  5024. // feed-forward network
  5025. {
  5026. if (model.layers[il].ffn_norm) {
  5027. cur = build_norm(ffn_inp,
  5028. model.layers[il].ffn_norm,
  5029. model.layers[il].ffn_norm_b,
  5030. LLM_NORM, il);
  5031. cb(cur, "ffn_norm", il);
  5032. } else {
  5033. // parallel residual
  5034. cur = inpSA;
  5035. }
  5036. cur = build_ffn(cur,
  5037. model.layers[il].ffn_up, NULL, NULL,
  5038. model.layers[il].ffn_gate, NULL, NULL,
  5039. model.layers[il].ffn_down, NULL, NULL,
  5040. NULL,
  5041. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5042. cb(cur, "ffn_out", il);
  5043. }
  5044. cur = ggml_add(ctx0, cur, ffn_inp);
  5045. cur = build_cvec(cur, il);
  5046. cb(cur, "l_out", il);
  5047. // input for next layer
  5048. inpL = cur;
  5049. }
  5050. cur = inpL;
  5051. cur = build_norm(cur,
  5052. model.output_norm,
  5053. model.output_norm_b,
  5054. LLM_NORM, -1);
  5055. cb(cur, "result_norm", -1);
  5056. res->t_embd = cur;
  5057. // lm_head
  5058. cur = build_lora_mm(model.output, cur);
  5059. cb(cur, "result_output", -1);
  5060. res->t_logits = cur;
  5061. ggml_build_forward_expand(gf, cur);
  5062. }
  5063. };
  5064. struct llm_build_qwen : public llm_graph_context {
  5065. llm_build_qwen(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5066. const int64_t n_embd_head = hparams.n_embd_head_v;
  5067. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5068. ggml_tensor * cur;
  5069. ggml_tensor * inpL;
  5070. inpL = build_inp_embd(model.tok_embd);
  5071. // inp_pos - contains the positions
  5072. ggml_tensor * inp_pos = build_inp_pos();
  5073. auto * inp_attn = build_attn_inp_kv_unified();
  5074. for (int il = 0; il < n_layer; ++il) {
  5075. ggml_tensor * inpSA = inpL;
  5076. cur = build_norm(inpL,
  5077. model.layers[il].attn_norm, NULL,
  5078. LLM_NORM_RMS, il);
  5079. cb(cur, "attn_norm", il);
  5080. // self-attention
  5081. {
  5082. cur = build_lora_mm(model.layers[il].wqkv, cur);
  5083. cb(cur, "wqkv", il);
  5084. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5085. cb(cur, "bqkv", il);
  5086. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5087. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5088. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd)));
  5089. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5090. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5091. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5092. // using mode = 2 for neox mode
  5093. Qcur = ggml_rope_ext(
  5094. ctx0, Qcur, inp_pos, nullptr,
  5095. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5096. ext_factor, attn_factor, beta_fast, beta_slow
  5097. );
  5098. Kcur = ggml_rope_ext(
  5099. ctx0, Kcur, inp_pos, nullptr,
  5100. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5101. ext_factor, attn_factor, beta_fast, beta_slow
  5102. );
  5103. cb(Qcur, "Qcur", il);
  5104. cb(Kcur, "Kcur", il);
  5105. cb(Vcur, "Vcur", il);
  5106. cur = build_attn(inp_attn, gf,
  5107. model.layers[il].wo, NULL,
  5108. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5109. }
  5110. if (il == n_layer - 1) {
  5111. // skip computing output for unused tokens
  5112. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5113. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5114. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5115. }
  5116. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5117. cb(ffn_inp, "ffn_inp", il);
  5118. // feed-forward forward
  5119. {
  5120. cur = build_norm(ffn_inp,
  5121. model.layers[il].ffn_norm, NULL,
  5122. LLM_NORM_RMS, il);
  5123. cb(cur, "ffn_norm", il);
  5124. cur = build_ffn(cur,
  5125. model.layers[il].ffn_up, NULL, NULL,
  5126. model.layers[il].ffn_gate, NULL, NULL,
  5127. model.layers[il].ffn_down, NULL, NULL,
  5128. NULL,
  5129. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5130. cb(cur, "ffn_out", il);
  5131. }
  5132. cur = ggml_add(ctx0, cur, ffn_inp);
  5133. cur = build_cvec(cur, il);
  5134. cb(cur, "l_out", il);
  5135. // input for next layer
  5136. inpL = cur;
  5137. }
  5138. cur = inpL;
  5139. cur = build_norm(cur,
  5140. model.output_norm, NULL,
  5141. LLM_NORM_RMS, -1);
  5142. cb(cur, "result_norm", -1);
  5143. res->t_embd = cur;
  5144. // lm_head
  5145. cur = build_lora_mm(model.output, cur);
  5146. cb(cur, "result_output", -1);
  5147. res->t_logits = cur;
  5148. ggml_build_forward_expand(gf, cur);
  5149. }
  5150. };
  5151. struct llm_build_qwen2 : public llm_graph_context {
  5152. llm_build_qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5153. const int64_t n_embd_head = hparams.n_embd_head_v;
  5154. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5155. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5156. ggml_tensor * cur;
  5157. ggml_tensor * inpL;
  5158. inpL = build_inp_embd(model.tok_embd);
  5159. // inp_pos - contains the positions
  5160. ggml_tensor * inp_pos = build_inp_pos();
  5161. auto * inp_attn = build_attn_inp_kv_unified();
  5162. for (int il = 0; il < n_layer; ++il) {
  5163. ggml_tensor * inpSA = inpL;
  5164. // norm
  5165. cur = build_norm(inpL,
  5166. model.layers[il].attn_norm, NULL,
  5167. LLM_NORM_RMS, il);
  5168. cb(cur, "attn_norm", il);
  5169. // self-attention
  5170. {
  5171. // compute Q and K and RoPE them
  5172. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5173. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5174. cb(Qcur, "Qcur", il);
  5175. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5176. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5177. cb(Kcur, "Kcur", il);
  5178. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5179. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5180. cb(Vcur, "Vcur", il);
  5181. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5182. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5183. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5184. Qcur = ggml_rope_ext(
  5185. ctx0, Qcur, inp_pos, nullptr,
  5186. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5187. ext_factor, attn_factor, beta_fast, beta_slow
  5188. );
  5189. Kcur = ggml_rope_ext(
  5190. ctx0, Kcur, inp_pos, nullptr,
  5191. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5192. ext_factor, attn_factor, beta_fast, beta_slow
  5193. );
  5194. cb(Qcur, "Qcur", il);
  5195. cb(Kcur, "Kcur", il);
  5196. cb(Vcur, "Vcur", il);
  5197. cur = build_attn(inp_attn, gf,
  5198. model.layers[il].wo, model.layers[il].bo,
  5199. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5200. }
  5201. if (il == n_layer - 1) {
  5202. // skip computing output for unused tokens
  5203. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5204. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5205. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5206. }
  5207. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5208. cb(ffn_inp, "ffn_inp", il);
  5209. // feed-forward network
  5210. cur = build_norm(ffn_inp,
  5211. model.layers[il].ffn_norm, NULL,
  5212. LLM_NORM_RMS, il);
  5213. cb(cur, "ffn_norm", il);
  5214. cur = build_ffn(cur,
  5215. model.layers[il].ffn_up, NULL, NULL,
  5216. model.layers[il].ffn_gate, NULL, NULL,
  5217. model.layers[il].ffn_down, NULL, NULL,
  5218. NULL,
  5219. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5220. cb(cur, "ffn_out", il);
  5221. cur = ggml_add(ctx0, cur, ffn_inp);
  5222. cur = build_cvec(cur, il);
  5223. cb(cur, "l_out", il);
  5224. // input for next layer
  5225. inpL = cur;
  5226. }
  5227. cur = inpL;
  5228. cur = build_norm(cur,
  5229. model.output_norm, NULL,
  5230. LLM_NORM_RMS, -1);
  5231. cb(cur, "result_norm", -1);
  5232. res->t_embd = cur;
  5233. // lm_head
  5234. cur = build_lora_mm(model.output, cur);
  5235. cb(cur, "result_output", -1);
  5236. res->t_logits = cur;
  5237. ggml_build_forward_expand(gf, cur);
  5238. }
  5239. };
  5240. struct llm_build_qwen2vl : public llm_graph_context {
  5241. llm_build_qwen2vl(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5242. const int64_t n_embd_head = hparams.n_embd_head_v;
  5243. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5244. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5245. ggml_tensor * cur;
  5246. ggml_tensor * inpL;
  5247. inpL = build_inp_embd(model.tok_embd);
  5248. // inp_pos - contains the positions
  5249. ggml_tensor * inp_pos = build_inp_pos();
  5250. auto * inp_attn = build_attn_inp_kv_unified();
  5251. int sections[4];
  5252. std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections);
  5253. for (int il = 0; il < n_layer; ++il) {
  5254. ggml_tensor * inpSA = inpL;
  5255. // norm
  5256. cur = build_norm(inpL,
  5257. model.layers[il].attn_norm, NULL,
  5258. LLM_NORM_RMS, il);
  5259. cb(cur, "attn_norm", il);
  5260. // self-attention
  5261. {
  5262. // compute Q and K and RoPE them
  5263. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5264. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5265. cb(Qcur, "Qcur", il);
  5266. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5267. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5268. cb(Kcur, "Kcur", il);
  5269. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5270. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5271. cb(Vcur, "Vcur", il);
  5272. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5273. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5274. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5275. Qcur = ggml_rope_multi(
  5276. ctx0, Qcur, inp_pos, nullptr,
  5277. n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale,
  5278. ext_factor, attn_factor, beta_fast, beta_slow
  5279. );
  5280. Kcur = ggml_rope_multi(
  5281. ctx0, Kcur, inp_pos, nullptr,
  5282. n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale,
  5283. ext_factor, attn_factor, beta_fast, beta_slow
  5284. );
  5285. cb(Qcur, "Qcur", il);
  5286. cb(Kcur, "Kcur", il);
  5287. cb(Vcur, "Vcur", il);
  5288. cur = build_attn(inp_attn, gf,
  5289. model.layers[il].wo, model.layers[il].bo,
  5290. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5291. }
  5292. if (il == n_layer - 1) {
  5293. // skip computing output for unused tokens
  5294. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5295. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5296. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5297. }
  5298. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5299. cb(ffn_inp, "ffn_inp", il);
  5300. // feed-forward network
  5301. cur = build_norm(ffn_inp,
  5302. model.layers[il].ffn_norm, NULL,
  5303. LLM_NORM_RMS, il);
  5304. cb(cur, "ffn_norm", il);
  5305. cur = build_ffn(cur,
  5306. model.layers[il].ffn_up, NULL, NULL,
  5307. model.layers[il].ffn_gate, NULL, NULL,
  5308. model.layers[il].ffn_down, NULL, NULL,
  5309. NULL,
  5310. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5311. cb(cur, "ffn_out", il);
  5312. cur = ggml_add(ctx0, cur, ffn_inp);
  5313. cur = build_cvec(cur, il);
  5314. cb(cur, "l_out", il);
  5315. // input for next layer
  5316. inpL = cur;
  5317. }
  5318. cur = inpL;
  5319. cur = build_norm(cur,
  5320. model.output_norm, NULL,
  5321. LLM_NORM_RMS, -1);
  5322. cb(cur, "result_norm", -1);
  5323. res->t_embd = cur;
  5324. // lm_head
  5325. cur = build_lora_mm(model.output, cur);
  5326. cb(cur, "result_output", -1);
  5327. res->t_logits = cur;
  5328. ggml_build_forward_expand(gf, cur);
  5329. }
  5330. };
  5331. struct llm_build_qwen2moe : public llm_graph_context {
  5332. llm_build_qwen2moe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5333. const int64_t n_embd_head = hparams.n_embd_head_v;
  5334. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5335. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5336. ggml_tensor * cur;
  5337. ggml_tensor * inpL;
  5338. inpL = build_inp_embd(model.tok_embd);
  5339. // inp_pos - contains the positions
  5340. ggml_tensor * inp_pos = build_inp_pos();
  5341. auto * inp_attn = build_attn_inp_kv_unified();
  5342. for (int il = 0; il < n_layer; ++il) {
  5343. ggml_tensor * inpSA = inpL;
  5344. // norm
  5345. cur = build_norm(inpL,
  5346. model.layers[il].attn_norm, NULL,
  5347. LLM_NORM_RMS, il);
  5348. cb(cur, "attn_norm", il);
  5349. // self_attention
  5350. {
  5351. // compute Q and K and RoPE them
  5352. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5353. cb(Qcur, "Qcur", il);
  5354. if (model.layers[il].bq) {
  5355. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5356. cb(Qcur, "Qcur", il);
  5357. }
  5358. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5359. cb(Kcur, "Kcur", il);
  5360. if (model.layers[il].bk) {
  5361. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5362. cb(Kcur, "Kcur", il);
  5363. }
  5364. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5365. cb(Vcur, "Vcur", il);
  5366. if (model.layers[il].bv) {
  5367. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5368. cb(Vcur, "Vcur", il);
  5369. }
  5370. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5371. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5372. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5373. Qcur = ggml_rope_ext(
  5374. ctx0, Qcur, inp_pos, nullptr,
  5375. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5376. ext_factor, attn_factor, beta_fast, beta_slow
  5377. );
  5378. Kcur = ggml_rope_ext(
  5379. ctx0, Kcur, inp_pos, nullptr,
  5380. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5381. ext_factor, attn_factor, beta_fast, beta_slow
  5382. );
  5383. cb(Qcur, "Qcur", il);
  5384. cb(Kcur, "Kcur", il);
  5385. cb(Vcur, "Vcur", il);
  5386. cur = build_attn(inp_attn, gf,
  5387. model.layers[il].wo, model.layers[il].bo,
  5388. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5389. }
  5390. if (il == n_layer - 1) {
  5391. // skip computing output for unused tokens
  5392. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5393. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5394. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5395. }
  5396. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5397. cb(ffn_inp, "ffn_inp", il);
  5398. // MoE branch
  5399. cur = build_norm(ffn_inp,
  5400. model.layers[il].ffn_norm, NULL,
  5401. LLM_NORM_RMS, il);
  5402. cb(cur, "ffn_norm", il);
  5403. ggml_tensor * moe_out =
  5404. build_moe_ffn(cur,
  5405. model.layers[il].ffn_gate_inp,
  5406. model.layers[il].ffn_up_exps,
  5407. model.layers[il].ffn_gate_exps,
  5408. model.layers[il].ffn_down_exps,
  5409. nullptr,
  5410. n_expert, n_expert_used,
  5411. LLM_FFN_SILU, false,
  5412. false, 0.0,
  5413. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  5414. il);
  5415. cb(moe_out, "ffn_moe_out", il);
  5416. // FFN shared expert
  5417. {
  5418. ggml_tensor * cur_gate_inp = build_lora_mm(model.layers[il].ffn_gate_inp_shexp, cur);
  5419. cb(cur_gate_inp, "ffn_shexp_gate_inp", il);
  5420. // sigmoid
  5421. ggml_tensor * cur_gate = ggml_div(ctx0, ggml_silu(ctx0, cur_gate_inp), cur_gate_inp);
  5422. cb(cur_gate, "ffn_shexp_gate", il);
  5423. ggml_tensor * cur_ffn = build_ffn(cur,
  5424. model.layers[il].ffn_up_shexp, NULL, NULL,
  5425. model.layers[il].ffn_gate_shexp, NULL, NULL,
  5426. model.layers[il].ffn_down_shexp, NULL, NULL,
  5427. NULL,
  5428. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5429. cb(cur_ffn, "ffn_shexp", il);
  5430. ggml_tensor * ffn_shexp_out = ggml_mul(ctx0, cur_ffn, cur_gate);
  5431. cb(ffn_shexp_out, "ffn_shexp_out", il);
  5432. moe_out = ggml_add(ctx0, moe_out, ffn_shexp_out);
  5433. cb(moe_out, "ffn_out", il);
  5434. cur = moe_out;
  5435. }
  5436. cur = ggml_add(ctx0, cur, ffn_inp);
  5437. cur = build_cvec(cur, il);
  5438. cb(cur, "l_out", il);
  5439. // input for next layer
  5440. inpL = cur;
  5441. }
  5442. cur = inpL;
  5443. cur = build_norm(cur,
  5444. model.output_norm, NULL,
  5445. LLM_NORM_RMS, -1);
  5446. cb(cur, "result_norm", -1);
  5447. res->t_embd = cur;
  5448. // lm_head
  5449. cur = build_lora_mm(model.output, cur);
  5450. cb(cur, "result_output", -1);
  5451. res->t_logits = cur;
  5452. ggml_build_forward_expand(gf, cur);
  5453. }
  5454. };
  5455. struct llm_build_qwen3 : public llm_graph_context {
  5456. llm_build_qwen3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5457. const int64_t n_embd_head = hparams.n_embd_head_v;
  5458. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5459. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5460. ggml_tensor * cur;
  5461. ggml_tensor * inpL;
  5462. inpL = build_inp_embd(model.tok_embd);
  5463. // inp_pos - contains the positions
  5464. ggml_tensor * inp_pos = build_inp_pos();
  5465. auto * inp_attn = build_attn_inp_kv_unified();
  5466. for (int il = 0; il < n_layer; ++il) {
  5467. ggml_tensor * inpSA = inpL;
  5468. // norm
  5469. cur = build_norm(inpL,
  5470. model.layers[il].attn_norm, NULL,
  5471. LLM_NORM_RMS, il);
  5472. cb(cur, "attn_norm", il);
  5473. // self-attention
  5474. {
  5475. // compute Q and K and RoPE them
  5476. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5477. cb(Qcur, "Qcur", il);
  5478. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5479. cb(Kcur, "Kcur", il);
  5480. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5481. cb(Vcur, "Vcur", il);
  5482. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5483. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5484. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5485. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
  5486. cb(Qcur, "Qcur_normed", il);
  5487. Qcur = ggml_rope_ext(
  5488. ctx0, Qcur, inp_pos, nullptr,
  5489. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5490. ext_factor, attn_factor, beta_fast, beta_slow
  5491. );
  5492. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
  5493. cb(Kcur, "Kcur_normed", il);
  5494. Kcur = ggml_rope_ext(
  5495. ctx0, Kcur, inp_pos, nullptr,
  5496. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5497. ext_factor, attn_factor, beta_fast, beta_slow
  5498. );
  5499. cb(Qcur, "Qcur", il);
  5500. cb(Kcur, "Kcur", il);
  5501. cb(Vcur, "Vcur", il);
  5502. cur = build_attn(inp_attn, gf,
  5503. model.layers[il].wo, model.layers[il].bo,
  5504. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5505. }
  5506. if (il == n_layer - 1) {
  5507. // skip computing output for unused tokens
  5508. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5509. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5510. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5511. }
  5512. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5513. cb(ffn_inp, "ffn_inp", il);
  5514. // feed-forward network
  5515. cur = build_norm(ffn_inp,
  5516. model.layers[il].ffn_norm, NULL,
  5517. LLM_NORM_RMS, il);
  5518. cb(cur, "ffn_norm", il);
  5519. cur = build_ffn(cur,
  5520. model.layers[il].ffn_up, NULL, NULL,
  5521. model.layers[il].ffn_gate, NULL, NULL,
  5522. model.layers[il].ffn_down, NULL, NULL,
  5523. NULL,
  5524. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5525. cb(cur, "ffn_out", il);
  5526. cur = ggml_add(ctx0, cur, ffn_inp);
  5527. cur = build_cvec(cur, il);
  5528. cb(cur, "l_out", il);
  5529. // input for next layer
  5530. inpL = cur;
  5531. }
  5532. cur = inpL;
  5533. cur = build_norm(cur,
  5534. model.output_norm, NULL,
  5535. LLM_NORM_RMS, -1);
  5536. cb(cur, "result_norm", -1);
  5537. res->t_embd = cur;
  5538. // lm_head
  5539. cur = build_lora_mm(model.output, cur);
  5540. cb(cur, "result_output", -1);
  5541. res->t_logits = cur;
  5542. ggml_build_forward_expand(gf, cur);
  5543. }
  5544. };
  5545. struct llm_build_qwen3moe : public llm_graph_context {
  5546. llm_build_qwen3moe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5547. const int64_t n_embd_head = hparams.n_embd_head_v;
  5548. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5549. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5550. ggml_tensor * cur;
  5551. ggml_tensor * inpL;
  5552. inpL = build_inp_embd(model.tok_embd);
  5553. // inp_pos - contains the positions
  5554. ggml_tensor * inp_pos = build_inp_pos();
  5555. auto * inp_attn = build_attn_inp_kv_unified();
  5556. for (int il = 0; il < n_layer; ++il) {
  5557. ggml_tensor * inpSA = inpL;
  5558. // norm
  5559. cur = build_norm(inpL,
  5560. model.layers[il].attn_norm, NULL,
  5561. LLM_NORM_RMS, il);
  5562. cb(cur, "attn_norm", il);
  5563. // self_attention
  5564. {
  5565. // compute Q and K and RoPE them
  5566. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5567. cb(Qcur, "Qcur", il);
  5568. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5569. cb(Kcur, "Kcur", il);
  5570. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5571. cb(Vcur, "Vcur", il);
  5572. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5573. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5574. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5575. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
  5576. cb(Qcur, "Qcur_normed", il);
  5577. Qcur = ggml_rope_ext(
  5578. ctx0, Qcur, inp_pos, nullptr,
  5579. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5580. ext_factor, attn_factor, beta_fast, beta_slow
  5581. );
  5582. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
  5583. cb(Kcur, "Kcur_normed", il);
  5584. Kcur = ggml_rope_ext(
  5585. ctx0, Kcur, inp_pos, nullptr,
  5586. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5587. ext_factor, attn_factor, beta_fast, beta_slow
  5588. );
  5589. cb(Qcur, "Qcur", il);
  5590. cb(Kcur, "Kcur", il);
  5591. cb(Vcur, "Vcur", il);
  5592. cur = build_attn(inp_attn, gf,
  5593. model.layers[il].wo, model.layers[il].bo,
  5594. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5595. }
  5596. if (il == n_layer - 1) {
  5597. // skip computing output for unused tokens
  5598. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5599. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5600. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  5601. }
  5602. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5603. cb(ffn_inp, "ffn_inp", il);
  5604. // MoE branch
  5605. cur = build_norm(ffn_inp,
  5606. model.layers[il].ffn_norm, NULL,
  5607. LLM_NORM_RMS, il);
  5608. cb(cur, "ffn_norm", il);
  5609. ggml_tensor * moe_out =
  5610. build_moe_ffn(cur,
  5611. model.layers[il].ffn_gate_inp,
  5612. model.layers[il].ffn_up_exps,
  5613. model.layers[il].ffn_gate_exps,
  5614. model.layers[il].ffn_down_exps,
  5615. nullptr,
  5616. n_expert, n_expert_used,
  5617. LLM_FFN_SILU, true,
  5618. false, 0.0,
  5619. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  5620. il);
  5621. cb(moe_out, "ffn_moe_out", il);
  5622. cur = moe_out;
  5623. cur = ggml_add(ctx0, cur, ffn_inp);
  5624. cur = build_cvec(cur, il);
  5625. cb(cur, "l_out", il);
  5626. // input for next layer
  5627. inpL = cur;
  5628. }
  5629. cur = inpL;
  5630. cur = build_norm(cur,
  5631. model.output_norm, NULL,
  5632. LLM_NORM_RMS, -1);
  5633. cb(cur, "result_norm", -1);
  5634. res->t_embd = cur;
  5635. // lm_head
  5636. cur = build_lora_mm(model.output, cur);
  5637. cb(cur, "result_output", -1);
  5638. res->t_logits = cur;
  5639. ggml_build_forward_expand(gf, cur);
  5640. }
  5641. };
  5642. struct llm_build_phi2 : public llm_graph_context {
  5643. llm_build_phi2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5644. const int64_t n_embd_head = hparams.n_embd_head_v;
  5645. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5646. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5647. ggml_tensor * cur;
  5648. ggml_tensor * attn_norm_output;
  5649. ggml_tensor * ffn_output;
  5650. ggml_tensor * inpL;
  5651. inpL = build_inp_embd(model.tok_embd);
  5652. // inp_pos - contains the positions
  5653. ggml_tensor * inp_pos = build_inp_pos();
  5654. auto * inp_attn = build_attn_inp_kv_unified();
  5655. for (int il = 0; il < n_layer; ++il) {
  5656. attn_norm_output = build_norm(inpL,
  5657. model.layers[il].attn_norm,
  5658. model.layers[il].attn_norm_b,
  5659. LLM_NORM, il);
  5660. cb(attn_norm_output, "attn_norm", il);
  5661. // self-attention
  5662. {
  5663. ggml_tensor * Qcur = nullptr;
  5664. ggml_tensor * Kcur = nullptr;
  5665. ggml_tensor * Vcur = nullptr;
  5666. if (model.layers[il].wqkv) {
  5667. cur = build_lora_mm(model.layers[il].wqkv, attn_norm_output);
  5668. cb(cur, "wqkv", il);
  5669. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5670. cb(cur, "bqkv", il);
  5671. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5672. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5673. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5674. } else {
  5675. Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, attn_norm_output), model.layers[il].bq);
  5676. Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, attn_norm_output), model.layers[il].bk);
  5677. Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, attn_norm_output), model.layers[il].bv);
  5678. }
  5679. cb(Qcur, "Qcur", il);
  5680. cb(Kcur, "Kcur", il);
  5681. cb(Vcur, "Vcur", il);
  5682. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5683. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5684. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5685. Qcur = ggml_rope_ext(
  5686. ctx0, Qcur, inp_pos, nullptr,
  5687. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5688. ext_factor, attn_factor, beta_fast, beta_slow
  5689. );
  5690. Kcur = ggml_rope_ext(
  5691. ctx0, Kcur, inp_pos, nullptr,
  5692. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5693. ext_factor, attn_factor, beta_fast, beta_slow
  5694. );
  5695. cb(Qcur, "Qcur", il);
  5696. cb(Kcur, "Kcur", il);
  5697. cb(Vcur, "Vcur", il);
  5698. // with phi2, we scale the Q to avoid precision issues
  5699. // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
  5700. Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head)));
  5701. cur = build_attn(inp_attn, gf,
  5702. model.layers[il].wo, model.layers[il].bo,
  5703. Qcur, Kcur, Vcur, nullptr, 1.0f, il);
  5704. }
  5705. if (il == n_layer - 1) {
  5706. // skip computing output for unused tokens
  5707. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5708. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5709. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  5710. attn_norm_output = ggml_get_rows(ctx0, attn_norm_output, inp_out_ids);
  5711. }
  5712. // FF
  5713. {
  5714. ffn_output = build_ffn(attn_norm_output,
  5715. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  5716. NULL, NULL, NULL,
  5717. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  5718. NULL,
  5719. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  5720. cb(ffn_output, "ffn_out", il);
  5721. }
  5722. cur = ggml_add(ctx0, cur, ffn_output);
  5723. cur = ggml_add(ctx0, cur, inpL);
  5724. cur = build_cvec(cur, il);
  5725. cb(cur, "l_out", il);
  5726. // input for next layer
  5727. inpL = cur;
  5728. }
  5729. cur = build_norm(inpL,
  5730. model.output_norm,
  5731. model.output_norm_b,
  5732. LLM_NORM, -1);
  5733. cb(cur, "result_norm", -1);
  5734. res->t_embd = cur;
  5735. cur = build_lora_mm(model.output, cur);
  5736. cb(cur, "result_output_no_bias", -1);
  5737. cur = ggml_add(ctx0, cur, model.output_b);
  5738. cb(cur, "result_output", -1);
  5739. res->t_logits = cur;
  5740. ggml_build_forward_expand(gf, cur);
  5741. }
  5742. };
  5743. struct llm_build_phi3 : public llm_graph_context {
  5744. llm_build_phi3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5745. const int64_t n_embd_head = hparams.n_embd_head_v;
  5746. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5747. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5748. ggml_tensor * cur;
  5749. ggml_tensor * inpL;
  5750. inpL = build_inp_embd(model.tok_embd);
  5751. // inp_pos - contains the positions
  5752. ggml_tensor * inp_pos = build_inp_pos();
  5753. auto * inp_attn = build_attn_inp_kv_unified();
  5754. for (int il = 0; il < n_layer; ++il) {
  5755. auto * residual = inpL;
  5756. // self-attention
  5757. {
  5758. // rope freq factors for 128k context
  5759. ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il);
  5760. ggml_tensor* attn_norm_output = build_norm(inpL,
  5761. model.layers[il].attn_norm,
  5762. model.layers[il].attn_norm_b,
  5763. LLM_NORM_RMS, il);
  5764. cb(attn_norm_output, "attn_norm", il);
  5765. ggml_tensor * Qcur = nullptr;
  5766. ggml_tensor * Kcur = nullptr;
  5767. ggml_tensor * Vcur = nullptr;
  5768. if (model.layers[il].wqkv) {
  5769. cur = build_lora_mm(model.layers[il].wqkv, attn_norm_output);
  5770. cb(cur, "wqkv", il);
  5771. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd)));
  5772. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd)));
  5773. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa)));
  5774. } else {
  5775. Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, attn_norm_output), model.layers[il].bq);
  5776. Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, attn_norm_output), model.layers[il].bk);
  5777. Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, attn_norm_output), model.layers[il].bv);
  5778. }
  5779. cb(Qcur, "Qcur", il);
  5780. cb(Kcur, "Kcur", il);
  5781. cb(Vcur, "Vcur", il);
  5782. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5783. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5784. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5785. Qcur = ggml_rope_ext(
  5786. ctx0, Qcur, inp_pos, rope_factors,
  5787. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5788. ext_factor, attn_factor, beta_fast, beta_slow
  5789. );
  5790. Kcur = ggml_rope_ext(
  5791. ctx0, Kcur, inp_pos, rope_factors,
  5792. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  5793. ext_factor, attn_factor, beta_fast, beta_slow
  5794. );
  5795. cb(Qcur, "Qcur", il);
  5796. cb(Kcur, "Kcur", il);
  5797. cb(Vcur, "Vcur", il);
  5798. Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head)));
  5799. cb(Qcur, "Qcur", il);
  5800. cur = build_attn(inp_attn, gf,
  5801. model.layers[il].wo, model.layers[il].bo,
  5802. Qcur, Kcur, Vcur, nullptr, 1.0f, il);
  5803. }
  5804. if (il == n_layer - 1) {
  5805. // skip computing output for unused tokens
  5806. ggml_tensor* inp_out_ids = build_inp_out_ids();
  5807. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5808. residual = ggml_get_rows(ctx0, residual, inp_out_ids);
  5809. }
  5810. cur = ggml_add(ctx0, cur, residual);
  5811. residual = cur;
  5812. cur = build_norm(cur,
  5813. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  5814. LLM_NORM_RMS, il);
  5815. cb(cur, "ffn_norm", il);
  5816. // feed-forward network
  5817. if (model.layers[il].ffn_gate_inp == nullptr) {
  5818. cur = build_ffn(cur,
  5819. model.layers[il].ffn_up, NULL, NULL,
  5820. NULL, NULL, NULL,
  5821. model.layers[il].ffn_down, NULL, NULL,
  5822. NULL,
  5823. LLM_FFN_SWIGLU, LLM_FFN_SEQ, il);
  5824. cb(cur, "ffn_out", il);
  5825. } else {
  5826. // MoE branch
  5827. cur = build_moe_ffn(cur,
  5828. model.layers[il].ffn_gate_inp,
  5829. model.layers[il].ffn_up_exps,
  5830. model.layers[il].ffn_gate_exps,
  5831. model.layers[il].ffn_down_exps,
  5832. nullptr,
  5833. n_expert, n_expert_used,
  5834. LLM_FFN_SILU, true,
  5835. false, 0.0,
  5836. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  5837. il);
  5838. cb(cur, "ffn_moe_out", il);
  5839. }
  5840. cur = ggml_add(ctx0, residual, cur);
  5841. cur = build_cvec(cur, il);
  5842. cb(cur, "l_out", il);
  5843. // input for next layer
  5844. inpL = cur;
  5845. }
  5846. cur = build_norm(inpL,
  5847. model.output_norm,
  5848. model.output_norm_b,
  5849. LLM_NORM_RMS, -1);
  5850. cb(cur, "result_norm", -1);
  5851. res->t_embd = cur;
  5852. cur = build_lora_mm(model.output, cur);
  5853. if (model.output_b != nullptr) {
  5854. cb(cur, "result_output_no_bias", -1);
  5855. cur = ggml_add(ctx0, cur, model.output_b);
  5856. }
  5857. cb(cur, "result_output", -1);
  5858. res->t_logits = cur;
  5859. ggml_build_forward_expand(gf, cur);
  5860. }
  5861. };
  5862. struct llm_build_plamo : public llm_graph_context {
  5863. llm_build_plamo(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5864. const int64_t n_embd_head = hparams.n_embd_head_v;
  5865. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5866. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5867. ggml_tensor * cur;
  5868. ggml_tensor * inpL;
  5869. inpL = build_inp_embd(model.tok_embd);
  5870. // inp_pos - contains the positions
  5871. ggml_tensor * inp_pos = build_inp_pos();
  5872. auto * inp_attn = build_attn_inp_kv_unified();
  5873. for (int il = 0; il < n_layer; ++il) {
  5874. // norm
  5875. cur = build_norm(inpL,
  5876. model.layers[il].attn_norm, NULL,
  5877. LLM_NORM_RMS, il);
  5878. cb(cur, "attn_norm", il);
  5879. ggml_tensor * attention_norm = cur;
  5880. // self-attention
  5881. {
  5882. // compute Q and K and RoPE them
  5883. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  5884. cb(Qcur, "Qcur", il);
  5885. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  5886. cb(Kcur, "Kcur", il);
  5887. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  5888. cb(Vcur, "Vcur", il);
  5889. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5890. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5891. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5892. Qcur = ggml_rope_ext(
  5893. ctx0, Qcur, inp_pos, nullptr,
  5894. n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale,
  5895. ext_factor, attn_factor, beta_fast, beta_slow
  5896. );
  5897. Kcur = ggml_rope_ext(
  5898. ctx0, Kcur, inp_pos, nullptr,
  5899. n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale,
  5900. ext_factor, attn_factor, beta_fast, beta_slow
  5901. );
  5902. cb(Qcur, "Qcur", il);
  5903. cb(Kcur, "Kcur", il);
  5904. cb(Vcur, "Vcur", il);
  5905. cur = build_attn(inp_attn, gf,
  5906. model.layers[il].wo, NULL,
  5907. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5908. }
  5909. ggml_tensor * sa_out = cur;
  5910. cur = attention_norm;
  5911. if (il == n_layer - 1) {
  5912. // skip computing output for unused tokens
  5913. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5914. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5915. sa_out = ggml_get_rows(ctx0, sa_out, inp_out_ids);
  5916. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  5917. }
  5918. // feed-forward network
  5919. {
  5920. cur = build_ffn(cur,
  5921. model.layers[il].ffn_up, NULL, NULL,
  5922. model.layers[il].ffn_gate, NULL, NULL,
  5923. model.layers[il].ffn_down, NULL, NULL,
  5924. NULL,
  5925. LLM_FFN_SILU, LLM_FFN_PAR, il);
  5926. cb(cur, "ffn_out", il);
  5927. }
  5928. cur = ggml_add(ctx0, cur, sa_out);
  5929. cur = ggml_add(ctx0, cur, inpL);
  5930. cur = build_cvec(cur, il);
  5931. cb(cur, "l_out", il);
  5932. // input for next layer
  5933. inpL = cur;
  5934. }
  5935. cur = inpL;
  5936. cur = build_norm(cur,
  5937. model.output_norm, NULL,
  5938. LLM_NORM_RMS, -1);
  5939. cb(cur, "result_norm", -1);
  5940. res->t_embd = cur;
  5941. // lm_head
  5942. cur = build_lora_mm(model.output, cur);
  5943. cb(cur, "result_output", -1);
  5944. res->t_logits = cur;
  5945. ggml_build_forward_expand(gf, cur);
  5946. }
  5947. };
  5948. struct llm_build_gpt2 : public llm_graph_context {
  5949. llm_build_gpt2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  5950. const int64_t n_embd_head = hparams.n_embd_head_v;
  5951. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5952. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5953. ggml_tensor * cur;
  5954. ggml_tensor * pos;
  5955. ggml_tensor * inpL;
  5956. inpL = build_inp_embd(model.tok_embd);
  5957. // inp_pos - contains the positions
  5958. ggml_tensor * inp_pos = build_inp_pos();
  5959. auto * inp_attn = build_attn_inp_kv_unified();
  5960. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  5961. cb(pos, "pos_embd", -1);
  5962. inpL = ggml_add(ctx0, inpL, pos);
  5963. cb(inpL, "inpL", -1);
  5964. for (int il = 0; il < n_layer; ++il) {
  5965. cur = build_norm(inpL,
  5966. model.layers[il].attn_norm,
  5967. model.layers[il].attn_norm_b,
  5968. LLM_NORM, il);
  5969. cb(cur, "attn_norm", il);
  5970. // self-attention
  5971. {
  5972. cur = build_lora_mm(model.layers[il].wqkv, cur);
  5973. cb(cur, "wqkv", il);
  5974. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5975. cb(cur, "bqkv", il);
  5976. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5977. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5978. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5979. cb(Qcur, "Qcur", il);
  5980. cb(Kcur, "Kcur", il);
  5981. cb(Vcur, "Vcur", il);
  5982. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5983. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5984. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  5985. cur = build_attn(inp_attn, gf,
  5986. model.layers[il].wo, model.layers[il].bo,
  5987. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  5988. }
  5989. if (il == n_layer - 1) {
  5990. // skip computing output for unused tokens
  5991. ggml_tensor * inp_out_ids = build_inp_out_ids();
  5992. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  5993. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  5994. }
  5995. // add the input
  5996. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5997. cb(ffn_inp, "ffn_inp", il);
  5998. // FF
  5999. {
  6000. cur = build_norm(ffn_inp,
  6001. model.layers[il].ffn_norm,
  6002. model.layers[il].ffn_norm_b,
  6003. LLM_NORM, il);
  6004. cb(cur, "ffn_norm", il);
  6005. cur = build_ffn(cur,
  6006. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  6007. NULL, NULL, NULL,
  6008. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  6009. NULL,
  6010. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  6011. cb(cur, "ffn_out", il);
  6012. }
  6013. cur = ggml_add(ctx0, cur, ffn_inp);
  6014. cur = build_cvec(cur, il);
  6015. cb(cur, "l_out", il);
  6016. // input for next layer
  6017. inpL = cur;
  6018. }
  6019. cur = build_norm(inpL,
  6020. model.output_norm,
  6021. model.output_norm_b,
  6022. LLM_NORM, -1);
  6023. cb(cur, "result_norm", -1);
  6024. res->t_embd = cur;
  6025. cur = build_lora_mm(model.output, cur);
  6026. cb(cur, "result_output", -1);
  6027. res->t_logits = cur;
  6028. ggml_build_forward_expand(gf, cur);
  6029. }
  6030. };
  6031. struct llm_build_codeshell : public llm_graph_context {
  6032. llm_build_codeshell(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6033. const int64_t n_embd_head = hparams.n_embd_head_v;
  6034. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6035. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6036. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6037. ggml_tensor * cur;
  6038. ggml_tensor * inpL;
  6039. inpL = build_inp_embd(model.tok_embd);
  6040. // inp_pos - contains the positions
  6041. ggml_tensor * inp_pos = build_inp_pos();
  6042. auto * inp_attn = build_attn_inp_kv_unified();
  6043. for (int il = 0; il < n_layer; ++il) {
  6044. cur = build_norm(inpL,
  6045. model.layers[il].attn_norm,
  6046. model.layers[il].attn_norm_b,
  6047. LLM_NORM, il);
  6048. cb(cur, "attn_norm", il);
  6049. // self-attention
  6050. {
  6051. cur = build_lora_mm(model.layers[il].wqkv, cur);
  6052. cb(cur, "wqkv", il);
  6053. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  6054. cb(cur, "bqkv", il);
  6055. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6056. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6057. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6058. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6059. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6060. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6061. Qcur = ggml_rope_ext(
  6062. ctx0, Qcur, inp_pos, nullptr,
  6063. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6064. ext_factor, attn_factor, beta_fast, beta_slow
  6065. );
  6066. Kcur = ggml_rope_ext(
  6067. ctx0, Kcur, inp_pos, nullptr,
  6068. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6069. ext_factor, attn_factor, beta_fast, beta_slow
  6070. );
  6071. cb(Qcur, "Qcur", il);
  6072. cb(Kcur, "Kcur", il);
  6073. cb(Vcur, "Vcur", il);
  6074. cur = build_attn(inp_attn, gf,
  6075. model.layers[il].wo, model.layers[il].bo,
  6076. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  6077. }
  6078. if (il == n_layer - 1) {
  6079. // skip computing output for unused tokens
  6080. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6081. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6082. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6083. }
  6084. // add the input
  6085. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  6086. cb(ffn_inp, "ffn_inp", il);
  6087. // FF
  6088. {
  6089. cur = build_norm(ffn_inp,
  6090. model.layers[il].ffn_norm,
  6091. model.layers[il].ffn_norm_b,
  6092. LLM_NORM, il);
  6093. cb(cur, "ffn_norm", il);
  6094. cur = build_ffn(cur,
  6095. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  6096. NULL, NULL, NULL,
  6097. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  6098. NULL,
  6099. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  6100. cb(cur, "ffn_out", il);
  6101. }
  6102. cur = ggml_add(ctx0, cur, ffn_inp);
  6103. cur = build_cvec(cur, il);
  6104. cb(cur, "l_out", il);
  6105. // input for next layer
  6106. inpL = cur;
  6107. }
  6108. cur = build_norm(inpL,
  6109. model.output_norm,
  6110. model.output_norm_b,
  6111. LLM_NORM, -1);
  6112. cb(cur, "result_norm", -1);
  6113. res->t_embd = cur;
  6114. cur = build_lora_mm(model.output, cur);
  6115. cb(cur, "result_output", -1);
  6116. res->t_logits = cur;
  6117. ggml_build_forward_expand(gf, cur);
  6118. }
  6119. };
  6120. struct llm_build_orion : public llm_graph_context {
  6121. llm_build_orion(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6122. const int64_t n_embd_head = hparams.n_embd_head_v;
  6123. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6124. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6125. ggml_tensor * cur;
  6126. ggml_tensor * inpL;
  6127. inpL = build_inp_embd(model.tok_embd);
  6128. // inp_pos - contains the positions
  6129. ggml_tensor * inp_pos = build_inp_pos();
  6130. auto * inp_attn = build_attn_inp_kv_unified();
  6131. for (int il = 0; il < n_layer; ++il) {
  6132. ggml_tensor * inpSA = inpL;
  6133. // norm
  6134. cur = build_norm(inpL,
  6135. model.layers[il].attn_norm, model.layers[il].attn_norm_b,
  6136. LLM_NORM, il);
  6137. cb(cur, "attn_norm", il);
  6138. // self-attention
  6139. {
  6140. // compute Q and K and RoPE them
  6141. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6142. cb(Qcur, "Qcur", il);
  6143. // if (model.layers[il].bq) {
  6144. // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6145. // cb(Qcur, "Qcur", il);
  6146. // }
  6147. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6148. cb(Kcur, "Kcur", il);
  6149. // if (model.layers[il].bk) {
  6150. // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6151. // cb(Kcur, "Kcur", il);
  6152. // }
  6153. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6154. cb(Vcur, "Vcur", il);
  6155. // if (model.layers[il].bv) {
  6156. // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6157. // cb(Vcur, "Vcur", il);
  6158. // }
  6159. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6160. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6161. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6162. Qcur = ggml_rope_ext(
  6163. ctx0, Qcur, inp_pos, nullptr,
  6164. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6165. ext_factor, attn_factor, beta_fast, beta_slow
  6166. );
  6167. Kcur = ggml_rope_ext(
  6168. ctx0, Kcur, inp_pos, nullptr,
  6169. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6170. ext_factor, attn_factor, beta_fast, beta_slow
  6171. );
  6172. cb(Qcur, "Qcur", il);
  6173. cb(Kcur, "Kcur", il);
  6174. cb(Vcur, "Vcur", il);
  6175. cur = build_attn(inp_attn, gf,
  6176. model.layers[il].wo, NULL,
  6177. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  6178. }
  6179. if (il == n_layer - 1) {
  6180. // skip computing output for unused tokens
  6181. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6182. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6183. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6184. }
  6185. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6186. cb(ffn_inp, "ffn_inp", il);
  6187. // feed-forward network
  6188. cur = build_norm(ffn_inp,
  6189. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  6190. LLM_NORM, il);
  6191. cb(cur, "ffn_norm", il);
  6192. cur = build_ffn(cur,
  6193. model.layers[il].ffn_up, NULL, NULL,
  6194. model.layers[il].ffn_gate, NULL, NULL,
  6195. model.layers[il].ffn_down, NULL, NULL,
  6196. NULL,
  6197. LLM_FFN_SILU, LLM_FFN_PAR, il);
  6198. cb(cur, "ffn_out", il);
  6199. cur = ggml_add(ctx0, cur, ffn_inp);
  6200. cur = build_cvec(cur, il);
  6201. cb(cur, "l_out", il);
  6202. // input for next layer
  6203. inpL = cur;
  6204. }
  6205. cur = inpL;
  6206. cur = build_norm(cur,
  6207. model.output_norm, model.output_norm_b,
  6208. LLM_NORM, -1);
  6209. cb(cur, "result_norm", -1);
  6210. res->t_embd = cur;
  6211. // lm_head
  6212. cur = build_lora_mm(model.output, cur);
  6213. cb(cur, "result_output", -1);
  6214. res->t_logits = cur;
  6215. ggml_build_forward_expand(gf, cur);
  6216. }
  6217. };
  6218. struct llm_build_internlm2 : public llm_graph_context {
  6219. llm_build_internlm2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6220. const int64_t n_embd_head = hparams.n_embd_head_v;
  6221. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6222. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6223. ggml_tensor * cur;
  6224. ggml_tensor * inpL;
  6225. inpL = build_inp_embd(model.tok_embd);
  6226. // inp_pos - contains the positions
  6227. ggml_tensor * inp_pos = build_inp_pos();
  6228. auto * inp_attn = build_attn_inp_kv_unified();
  6229. for (int il = 0; il < n_layer; ++il) {
  6230. ggml_tensor * inpSA = inpL;
  6231. // norm
  6232. cur = build_norm(inpL,
  6233. model.layers[il].attn_norm, NULL,
  6234. LLM_NORM_RMS, il);
  6235. cb(cur, "attn_norm", il);
  6236. // self-attention
  6237. {
  6238. // compute Q and K and RoPE them
  6239. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6240. cb(Qcur, "Qcur", il);
  6241. if (model.layers[il].bq) {
  6242. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6243. cb(Qcur, "Qcur", il);
  6244. }
  6245. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6246. cb(Kcur, "Kcur", il);
  6247. if (model.layers[il].bk) {
  6248. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6249. cb(Kcur, "Kcur", il);
  6250. }
  6251. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6252. cb(Vcur, "Vcur", il);
  6253. if (model.layers[il].bv) {
  6254. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6255. cb(Vcur, "Vcur", il);
  6256. }
  6257. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6258. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6259. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6260. Qcur = ggml_rope_ext(
  6261. ctx0, Qcur, inp_pos, nullptr,
  6262. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6263. ext_factor, attn_factor, beta_fast, beta_slow
  6264. );
  6265. Kcur = ggml_rope_ext(
  6266. ctx0, Kcur, inp_pos, nullptr,
  6267. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6268. ext_factor, attn_factor, beta_fast, beta_slow
  6269. );
  6270. cb(Qcur, "Qcur", il);
  6271. cb(Kcur, "Kcur", il);
  6272. cb(Vcur, "Vcur", il);
  6273. cur = build_attn(inp_attn, gf,
  6274. model.layers[il].wo, model.layers[il].bo,
  6275. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  6276. }
  6277. if (il == n_layer - 1) {
  6278. // skip computing output for unused tokens
  6279. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6280. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6281. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6282. }
  6283. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6284. cb(ffn_inp, "ffn_inp", il);
  6285. // feed-forward network
  6286. cur = build_norm(ffn_inp,
  6287. model.layers[il].ffn_norm, NULL,
  6288. LLM_NORM_RMS, il);
  6289. cb(cur, "ffn_norm", il);
  6290. cur = build_ffn(cur,
  6291. model.layers[il].ffn_up, NULL, NULL,
  6292. model.layers[il].ffn_gate, NULL, NULL,
  6293. model.layers[il].ffn_down, NULL, NULL,
  6294. NULL,
  6295. LLM_FFN_SILU, LLM_FFN_PAR, il);
  6296. cb(cur, "ffn_out", il);
  6297. cur = ggml_add(ctx0, cur, ffn_inp);
  6298. cur = build_cvec(cur, il);
  6299. cb(cur, "l_out", il);
  6300. // input for next layer
  6301. inpL = cur;
  6302. }
  6303. cur = inpL;
  6304. cur = build_norm(cur,
  6305. model.output_norm, NULL,
  6306. LLM_NORM_RMS, -1);
  6307. cb(cur, "result_norm", -1);
  6308. res->t_embd = cur;
  6309. // lm_head
  6310. cur = build_lora_mm(model.output, cur);
  6311. cb(cur, "result_output", -1);
  6312. res->t_logits = cur;
  6313. ggml_build_forward_expand(gf, cur);
  6314. }
  6315. };
  6316. struct llm_build_minicpm3 : public llm_graph_context {
  6317. llm_build_minicpm3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6318. //TODO: if the model varies, these parameters need to be read from the model
  6319. const int64_t n_embd_base = 256;
  6320. const float scale_embd = 12.0f;
  6321. const float scale_depth = 1.4f;
  6322. const float kq_scale = 1.0f / sqrtf(float(hparams.n_embd_head_k));
  6323. const uint32_t n_embd_head_qk_rope = hparams.n_rot;
  6324. const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  6325. const uint32_t kv_lora_rank = hparams.n_lora_kv;
  6326. ggml_tensor * cur;
  6327. ggml_tensor * inpL;
  6328. inpL = build_inp_embd(model.tok_embd);
  6329. // scale the input embeddings
  6330. inpL = ggml_scale(ctx0, inpL, scale_embd);
  6331. cb(inpL, "inp_scaled", -1);
  6332. // inp_pos - contains the positions
  6333. ggml_tensor * inp_pos = build_inp_pos();
  6334. auto * inp_attn = build_attn_inp_kv_unified();
  6335. for (int il = 0; il < n_layer; ++il) {
  6336. ggml_tensor * inpSA = inpL;
  6337. ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il);
  6338. // norm
  6339. cur = build_norm(inpL,
  6340. model.layers[il].attn_norm, NULL,
  6341. LLM_NORM_RMS, il);
  6342. cb(cur, "attn_norm", il);
  6343. // self_attention
  6344. {
  6345. ggml_tensor * q = NULL;
  6346. // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens}
  6347. q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
  6348. cb(q, "q", il);
  6349. q = build_norm(q,
  6350. model.layers[il].attn_q_a_norm, NULL,
  6351. LLM_NORM_RMS, il);
  6352. cb(q, "q", il);
  6353. // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens}
  6354. q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
  6355. cb(q, "q", il);
  6356. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  6357. ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
  6358. ggml_row_size(q->type, hparams.n_embd_head_k),
  6359. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  6360. 0);
  6361. cb(q_nope, "q_nope", il);
  6362. // and {n_head * n_embd_head_qk_rope, n_tokens}
  6363. ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
  6364. ggml_row_size(q->type, hparams.n_embd_head_k),
  6365. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  6366. ggml_row_size(q->type, n_embd_head_qk_nope));
  6367. cb(q_pe, "q_pe", il);
  6368. // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
  6369. ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
  6370. cb(kv_pe_compresseed, "kv_pe_compresseed", il);
  6371. // split into {kv_lora_rank, n_tokens}
  6372. ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
  6373. kv_pe_compresseed->nb[1],
  6374. 0);
  6375. cb(kv_compressed, "kv_compressed", il);
  6376. // and {n_embd_head_qk_rope, n_tokens}
  6377. ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
  6378. kv_pe_compresseed->nb[1],
  6379. kv_pe_compresseed->nb[1],
  6380. ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
  6381. cb(k_pe, "k_pe", il);
  6382. // TODO: the CUDA backend used to not support non-cont. (RMS) norm, investigate removing ggml_cont
  6383. kv_compressed = ggml_cont(ctx0, kv_compressed);
  6384. kv_compressed = build_norm(kv_compressed,
  6385. model.layers[il].attn_kv_a_norm, NULL,
  6386. LLM_NORM_RMS, il);
  6387. cb(kv_compressed, "kv_compressed", il);
  6388. // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
  6389. ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
  6390. cb(kv, "kv", il);
  6391. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  6392. ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
  6393. ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
  6394. ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  6395. 0);
  6396. cb(k_nope, "k_nope", il);
  6397. // and {n_head * n_embd_head_v, n_tokens}
  6398. ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
  6399. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  6400. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
  6401. ggml_row_size(kv->type, (n_embd_head_qk_nope)));
  6402. cb(v_states, "v_states", il);
  6403. v_states = ggml_cont(ctx0, v_states);
  6404. cb(v_states, "v_states", il);
  6405. v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,
  6406. ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),
  6407. 0);
  6408. cb(v_states, "v_states", il);
  6409. q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this
  6410. q_pe = ggml_rope_ext(
  6411. ctx0, q_pe, inp_pos, rope_factors,
  6412. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6413. ext_factor, attn_factor, beta_fast, beta_slow
  6414. );
  6415. cb(q_pe, "q_pe", il);
  6416. // shared RoPE key
  6417. k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this
  6418. k_pe = ggml_rope_ext(
  6419. ctx0, k_pe, inp_pos, rope_factors,
  6420. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6421. ext_factor, attn_factor, beta_fast, beta_slow
  6422. );
  6423. cb(k_pe, "k_pe", il);
  6424. ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);
  6425. cb(q_states, "q_states", il);
  6426. ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
  6427. cb(k_states, "k_states", il);
  6428. cur = build_attn(inp_attn, gf,
  6429. model.layers[il].wo, NULL,
  6430. q_states, k_states, v_states, nullptr, kq_scale, il);
  6431. }
  6432. if (il == n_layer - 1) {
  6433. // skip computing output for unused tokens
  6434. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6435. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6436. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6437. }
  6438. // scale_res - scale the hidden states for residual connection
  6439. const float scale_res = scale_depth/sqrtf(float(n_layer));
  6440. cur = ggml_scale(ctx0, cur, scale_res);
  6441. cb(cur, "hidden_scaled", il);
  6442. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6443. cb(ffn_inp, "ffn_inp", il);
  6444. // feed-forward network
  6445. {
  6446. cur = build_norm(ffn_inp,
  6447. model.layers[il].ffn_norm, NULL,
  6448. LLM_NORM_RMS, il);
  6449. cb(cur, "ffn_norm", il);
  6450. cur = build_ffn(cur,
  6451. model.layers[il].ffn_up, NULL, NULL,
  6452. model.layers[il].ffn_gate, NULL, NULL,
  6453. model.layers[il].ffn_down, NULL, NULL,
  6454. NULL,
  6455. LLM_FFN_SILU, LLM_FFN_PAR, il);
  6456. cb(cur, "ffn_out", il);
  6457. }
  6458. // scale the hidden states for residual connection
  6459. cur = ggml_scale(ctx0, cur, scale_res);
  6460. cb(cur, "hidden_scaled_ffn", il);
  6461. cur = ggml_add(ctx0, cur, ffn_inp);
  6462. cur = build_cvec(cur, il);
  6463. cb(cur, "l_out", il);
  6464. // input for next layer
  6465. inpL = cur;
  6466. }
  6467. cur = inpL;
  6468. cur = build_norm(cur,
  6469. model.output_norm, NULL,
  6470. LLM_NORM_RMS, -1);
  6471. cb(cur, "result_norm", -1);
  6472. res->t_embd = cur;
  6473. // lm_head scaling
  6474. const float scale_lmhead = float(n_embd_base)/float(n_embd);
  6475. cur = ggml_scale(ctx0, cur, scale_lmhead);
  6476. cb(cur, "lmhead_scaling", -1);
  6477. // lm_head
  6478. cur = build_lora_mm(model.output, cur);
  6479. cb(cur, "result_output", -1);
  6480. res->t_logits = cur;
  6481. ggml_build_forward_expand(gf, cur);
  6482. }
  6483. };
  6484. struct llm_build_gemma : public llm_graph_context {
  6485. llm_build_gemma(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6486. const int64_t n_embd_head = hparams.n_embd_head_v;
  6487. ggml_tensor * cur;
  6488. ggml_tensor * inpL;
  6489. inpL = build_inp_embd(model.tok_embd);
  6490. inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
  6491. cb(inpL, "inp_scaled", -1);
  6492. // inp_pos - contains the positions
  6493. ggml_tensor * inp_pos = build_inp_pos();
  6494. auto * inp_attn = build_attn_inp_kv_unified();
  6495. for (int il = 0; il < n_layer; ++il) {
  6496. // norm
  6497. cur = build_norm(inpL,
  6498. model.layers[il].attn_norm, NULL,
  6499. LLM_NORM_RMS, il);
  6500. cb(cur, "attn_norm", il);
  6501. // self-attention
  6502. {
  6503. // compute Q and K and RoPE them
  6504. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6505. cb(Qcur, "Qcur", il);
  6506. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6507. cb(Kcur, "Kcur", il);
  6508. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6509. cb(Vcur, "Vcur", il);
  6510. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6511. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6512. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6513. Qcur = ggml_rope_ext(
  6514. ctx0, Qcur, inp_pos, nullptr,
  6515. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6516. ext_factor, attn_factor, beta_fast, beta_slow);
  6517. Kcur = ggml_rope_ext(
  6518. ctx0, Kcur, inp_pos, nullptr,
  6519. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6520. ext_factor, attn_factor, beta_fast, beta_slow);
  6521. cb(Qcur, "Qcur", il);
  6522. cb(Kcur, "Kcur", il);
  6523. cb(Vcur, "Vcur", il);
  6524. Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head)));
  6525. cb(Qcur, "Qcur_scaled", il);
  6526. cur = build_attn(inp_attn, gf,
  6527. model.layers[il].wo, NULL,
  6528. Qcur, Kcur, Vcur, nullptr, 1.0f, il);
  6529. }
  6530. if (il == n_layer - 1) {
  6531. // skip computing output for unused tokens
  6532. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6533. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6534. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6535. }
  6536. ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
  6537. cb(sa_out, "sa_out", il);
  6538. cur = build_norm(sa_out,
  6539. model.layers[il].ffn_norm, NULL,
  6540. LLM_NORM_RMS, il);
  6541. cb(cur, "ffn_norm", il);
  6542. // feed-forward network
  6543. {
  6544. cur = build_ffn(cur,
  6545. model.layers[il].ffn_up, NULL, NULL,
  6546. model.layers[il].ffn_gate, NULL, NULL,
  6547. model.layers[il].ffn_down, NULL, NULL,
  6548. NULL,
  6549. LLM_FFN_GELU, LLM_FFN_PAR, il);
  6550. cb(cur, "ffn_out", il);
  6551. }
  6552. cur = ggml_add(ctx0, cur, sa_out);
  6553. cur = build_cvec(cur, il);
  6554. cb(cur, "l_out", il);
  6555. // input for next layer
  6556. inpL = cur;
  6557. }
  6558. cur = inpL;
  6559. cur = build_norm(cur,
  6560. model.output_norm, NULL,
  6561. LLM_NORM_RMS, -1);
  6562. cb(cur, "result_norm", -1);
  6563. res->t_embd = cur;
  6564. // lm_head
  6565. cur = build_lora_mm(model.output, cur);
  6566. cb(cur, "result_output", -1);
  6567. res->t_logits = cur;
  6568. ggml_build_forward_expand(gf, cur);
  6569. }
  6570. };
  6571. struct llm_build_gemma2 : public llm_graph_context {
  6572. llm_build_gemma2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6573. const int64_t n_embd_head = hparams.n_embd_head_k;
  6574. ggml_tensor * cur;
  6575. ggml_tensor * inpL;
  6576. inpL = build_inp_embd(model.tok_embd);
  6577. inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
  6578. cb(inpL, "inp_scaled", -1);
  6579. // inp_pos - contains the positions
  6580. ggml_tensor * inp_pos = build_inp_pos();
  6581. auto * inp_attn = build_attn_inp_kv_unified();
  6582. for (int il = 0; il < n_layer; ++il) {
  6583. // norm
  6584. cur = build_norm(inpL,
  6585. model.layers[il].attn_norm, NULL,
  6586. LLM_NORM_RMS, il);
  6587. cb(cur, "attn_norm", il);
  6588. // self-attention
  6589. {
  6590. // compute Q and K and RoPE them
  6591. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6592. cb(Qcur, "Qcur", il);
  6593. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6594. cb(Kcur, "Kcur", il);
  6595. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6596. cb(Vcur, "Vcur", il);
  6597. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6598. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6599. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6600. Qcur = ggml_rope_ext(
  6601. ctx0, Qcur, inp_pos, nullptr,
  6602. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6603. ext_factor, attn_factor, beta_fast, beta_slow);
  6604. Kcur = ggml_rope_ext(
  6605. ctx0, Kcur, inp_pos, nullptr,
  6606. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6607. ext_factor, attn_factor, beta_fast, beta_slow);
  6608. cb(Qcur, "Qcur", il);
  6609. cb(Kcur, "Kcur", il);
  6610. cb(Vcur, "Vcur", il);
  6611. // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
  6612. switch (model.type) {
  6613. case LLM_TYPE_2B:
  6614. case LLM_TYPE_9B:
  6615. case LLM_TYPE_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); break;
  6616. default: GGML_ABORT("fatal error");
  6617. };
  6618. cb(Qcur, "Qcur_scaled", il);
  6619. cur = build_attn(inp_attn, gf,
  6620. model.layers[il].wo, NULL,
  6621. Qcur, Kcur, Vcur, nullptr, 1.0f, il);
  6622. }
  6623. cur = build_norm(cur,
  6624. model.layers[il].attn_post_norm, NULL,
  6625. LLM_NORM_RMS, il);
  6626. cb(cur, "attn_post_norm", il);
  6627. if (il == n_layer - 1) {
  6628. // skip computing output for unused tokens
  6629. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6630. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6631. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6632. }
  6633. ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
  6634. cb(sa_out, "sa_out", il);
  6635. cur = build_norm(sa_out,
  6636. model.layers[il].ffn_norm, NULL,
  6637. LLM_NORM_RMS, il);
  6638. cb(cur, "ffn_norm", il);
  6639. // feed-forward network
  6640. {
  6641. cur = build_ffn(cur,
  6642. model.layers[il].ffn_up, NULL, NULL,
  6643. model.layers[il].ffn_gate, NULL, NULL,
  6644. model.layers[il].ffn_down, NULL, NULL,
  6645. NULL,
  6646. LLM_FFN_GELU, LLM_FFN_PAR, il);
  6647. cb(cur, "ffn_out", il);
  6648. }
  6649. cur = build_norm(cur,
  6650. model.layers[il].ffn_post_norm, NULL,
  6651. LLM_NORM_RMS, -1);
  6652. cb(cur, "ffn_post_norm", -1);
  6653. cur = ggml_add(ctx0, cur, sa_out);
  6654. cur = build_cvec(cur, il);
  6655. cb(cur, "l_out", il);
  6656. // input for next layer
  6657. inpL = cur;
  6658. }
  6659. cur = inpL;
  6660. cur = build_norm(cur,
  6661. model.output_norm, NULL,
  6662. LLM_NORM_RMS, -1);
  6663. cb(cur, "result_norm", -1);
  6664. res->t_embd = cur;
  6665. // lm_head
  6666. cur = build_lora_mm(model.output, cur);
  6667. // final logit soft-capping
  6668. cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_final_logit_softcapping);
  6669. cur = ggml_tanh(ctx0, cur);
  6670. cur = ggml_scale(ctx0, cur, hparams.f_final_logit_softcapping);
  6671. cb(cur, "result_output", -1);
  6672. res->t_logits = cur;
  6673. ggml_build_forward_expand(gf, cur);
  6674. }
  6675. };
  6676. struct llm_build_gemma3 : public llm_graph_context {
  6677. llm_build_gemma3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6678. const int64_t n_embd_head = hparams.n_embd_head_k;
  6679. ggml_tensor * cur;
  6680. ggml_tensor * inpL;
  6681. inpL = build_inp_embd(model.tok_embd);
  6682. // important: do not normalize weights for raw embeddings input (i.e. encoded image emdeddings)
  6683. if (ubatch.token) {
  6684. inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
  6685. cb(inpL, "inp_scaled", -1);
  6686. }
  6687. // inp_pos - contains the positions
  6688. ggml_tensor * inp_pos = build_inp_pos();
  6689. // TODO: is causal == true correct? might need some changes
  6690. auto * inp_attn = build_attn_inp_kv_unified();
  6691. for (int il = 0; il < n_layer; ++il) {
  6692. const bool is_swa = hparams.is_swa(il);
  6693. const float freq_base_l = is_swa ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base;
  6694. const float freq_scale_l = is_swa ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale;
  6695. // norm
  6696. cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
  6697. cb(cur, "attn_norm", il);
  6698. // self-attention
  6699. {
  6700. // compute Q and K and RoPE them
  6701. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6702. cb(Qcur, "Qcur", il);
  6703. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6704. cb(Kcur, "Kcur", il);
  6705. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6706. cb(Vcur, "Vcur", il);
  6707. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6708. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6709. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6710. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
  6711. cb(Qcur, "Qcur_normed", il);
  6712. Qcur = ggml_rope_ext(
  6713. ctx0, Qcur, inp_pos, nullptr,
  6714. n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
  6715. ext_factor, attn_factor, beta_fast, beta_slow);
  6716. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
  6717. cb(Kcur, "Kcur_normed", il);
  6718. Kcur = ggml_rope_ext(
  6719. ctx0, Kcur, inp_pos, nullptr,
  6720. n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
  6721. ext_factor, attn_factor, beta_fast, beta_slow);
  6722. cb(Qcur, "Qcur", il);
  6723. cb(Kcur, "Kcur", il);
  6724. cb(Vcur, "Vcur", il);
  6725. cur = build_attn(inp_attn, gf,
  6726. model.layers[il].wo, NULL,
  6727. Qcur, Kcur, Vcur, nullptr, hparams.f_attention_scale, il);
  6728. }
  6729. cur = build_norm(cur,
  6730. model.layers[il].attn_post_norm, NULL,
  6731. LLM_NORM_RMS, il);
  6732. cb(cur, "attn_post_norm", il);
  6733. if (il == n_layer - 1) {
  6734. // skip computing output for unused tokens
  6735. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6736. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6737. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6738. }
  6739. ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
  6740. cb(sa_out, "sa_out", il);
  6741. cur = build_norm(sa_out,
  6742. model.layers[il].ffn_norm, NULL,
  6743. LLM_NORM_RMS, il);
  6744. cb(cur, "ffn_norm", il);
  6745. // feed-forward network
  6746. {
  6747. cur = build_ffn(cur,
  6748. model.layers[il].ffn_up, NULL, NULL,
  6749. model.layers[il].ffn_gate, NULL, NULL,
  6750. model.layers[il].ffn_down, NULL, NULL,
  6751. NULL,
  6752. LLM_FFN_GELU, LLM_FFN_PAR, il);
  6753. cb(cur, "ffn_out", il);
  6754. }
  6755. cur = build_norm(cur,
  6756. model.layers[il].ffn_post_norm, NULL,
  6757. LLM_NORM_RMS, -1);
  6758. cb(cur, "ffn_post_norm", -1);
  6759. cur = ggml_add(ctx0, cur, sa_out);
  6760. cur = build_cvec(cur, il);
  6761. cb(cur, "l_out", il);
  6762. // input for next layer
  6763. inpL = cur;
  6764. }
  6765. cur = inpL;
  6766. cur = build_norm(cur,
  6767. model.output_norm, NULL,
  6768. LLM_NORM_RMS, -1);
  6769. cb(cur, "result_norm", -1);
  6770. res->t_embd = cur;
  6771. // lm_head
  6772. cur = build_lora_mm(model.output, cur);
  6773. cb(cur, "result_output", -1);
  6774. res->t_logits = cur;
  6775. ggml_build_forward_expand(gf, cur);
  6776. }
  6777. };
  6778. // TODO: move up next to build_starcoder
  6779. struct llm_build_starcoder2 : public llm_graph_context {
  6780. llm_build_starcoder2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  6781. const int64_t n_embd_head = hparams.n_embd_head_v;
  6782. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6783. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6784. ggml_tensor * cur;
  6785. ggml_tensor * inpL;
  6786. inpL = build_inp_embd(model.tok_embd);
  6787. // inp_pos - contains the positions
  6788. ggml_tensor * inp_pos = build_inp_pos();
  6789. auto * inp_attn = build_attn_inp_kv_unified();
  6790. for (int il = 0; il < n_layer; ++il) {
  6791. ggml_tensor * inpSA = inpL;
  6792. // norm
  6793. cur = build_norm(inpL,
  6794. model.layers[il].attn_norm, model.layers[il].attn_norm_b,
  6795. LLM_NORM, il);
  6796. cb(cur, "attn_norm", il);
  6797. // self-attention
  6798. {
  6799. // compute Q and K and RoPE them
  6800. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  6801. cb(Qcur, "Qcur", il);
  6802. if (model.layers[il].bq) {
  6803. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6804. cb(Qcur, "Qcur", il);
  6805. }
  6806. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  6807. cb(Kcur, "Kcur", il);
  6808. if (model.layers[il].bk) {
  6809. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6810. cb(Kcur, "Kcur", il);
  6811. }
  6812. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  6813. cb(Vcur, "Vcur", il);
  6814. if (model.layers[il].bv) {
  6815. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6816. cb(Vcur, "Vcur", il);
  6817. }
  6818. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6819. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6820. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  6821. Qcur = ggml_rope_ext(
  6822. ctx0, Qcur, inp_pos, nullptr,
  6823. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6824. ext_factor, attn_factor, beta_fast, beta_slow
  6825. );
  6826. Kcur = ggml_rope_ext(
  6827. ctx0, Kcur, inp_pos, nullptr,
  6828. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6829. ext_factor, attn_factor, beta_fast, beta_slow
  6830. );
  6831. cb(Qcur, "Qcur", il);
  6832. cb(Kcur, "Kcur", il);
  6833. cb(Vcur, "Vcur", il);
  6834. cur = build_attn(inp_attn, gf,
  6835. model.layers[il].wo, model.layers[il].bo,
  6836. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  6837. }
  6838. if (il == n_layer - 1) {
  6839. // skip computing output for unused tokens
  6840. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6841. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6842. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6843. }
  6844. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6845. cb(ffn_inp, "ffn_inp", il);
  6846. // feed-forward network
  6847. cur = build_norm(ffn_inp,
  6848. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  6849. LLM_NORM, il);
  6850. cb(cur, "ffn_norm", il);
  6851. cur = build_ffn(cur,
  6852. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  6853. NULL, NULL, NULL,
  6854. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  6855. NULL,
  6856. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  6857. cb(cur, "ffn_out", il);
  6858. cur = ggml_add(ctx0, cur, ffn_inp);
  6859. cur = build_cvec(cur, il);
  6860. cb(cur, "l_out", il);
  6861. // input for next layer
  6862. inpL = cur;
  6863. }
  6864. cur = inpL;
  6865. cur = build_norm(cur,
  6866. model.output_norm, model.output_norm_b,
  6867. LLM_NORM, -1);
  6868. cb(cur, "result_norm", -1);
  6869. res->t_embd = cur;
  6870. // lm_head
  6871. cur = build_lora_mm(model.output, cur);
  6872. cb(cur, "result_output", -1);
  6873. res->t_logits = cur;
  6874. ggml_build_forward_expand(gf, cur);
  6875. }
  6876. };
  6877. struct llm_build_mamba : public llm_graph_context {
  6878. const llama_model & model;
  6879. llm_build_mamba(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params), model(model) {
  6880. ggml_tensor * cur;
  6881. ggml_tensor * inpL;
  6882. // {n_embd, n_tokens}
  6883. inpL = build_inp_embd(model.tok_embd);
  6884. ggml_tensor * state_copy = build_inp_s_copy();
  6885. ggml_tensor * state_mask = build_inp_s_mask();
  6886. for (int il = 0; il < n_layer; ++il) {
  6887. // norm
  6888. cur = build_norm(inpL,
  6889. model.layers[il].attn_norm, NULL,
  6890. LLM_NORM_RMS, il);
  6891. cb(cur, "attn_norm", il);
  6892. //cur = build_mamba_layer(gf, cur, state_copy, state_mask, il);
  6893. cur = build_mamba_layer(gf, cur, state_copy, state_mask, ubatch, il);
  6894. if (il == n_layer - 1) {
  6895. // skip computing output for unused tokens
  6896. ggml_tensor * inp_out_ids = build_inp_out_ids();
  6897. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6898. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6899. }
  6900. // residual
  6901. cur = ggml_add(ctx0, cur, inpL);
  6902. cur = build_cvec(cur, il);
  6903. cb(cur, "l_out", il);
  6904. // input for next layer
  6905. inpL = cur;
  6906. }
  6907. // final rmsnorm
  6908. cur = build_norm(inpL,
  6909. model.output_norm, NULL,
  6910. LLM_NORM_RMS, -1);
  6911. cb(cur, "result_norm", -1);
  6912. res->t_embd = cur;
  6913. // lm_head
  6914. cur = build_lora_mm(model.output, cur);
  6915. cb(cur, "result_output", -1);
  6916. res->t_logits = cur;
  6917. ggml_build_forward_expand(gf, cur);
  6918. }
  6919. // TODO: split
  6920. ggml_tensor * build_mamba_layer(
  6921. ggml_cgraph * gf,
  6922. ggml_tensor * cur,
  6923. ggml_tensor * state_copy,
  6924. ggml_tensor * state_mask,
  6925. const llama_ubatch & ubatch,
  6926. int il) const {
  6927. const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory);
  6928. const auto kv_head = kv_self->head;
  6929. const int64_t d_conv = hparams.ssm_d_conv;
  6930. const int64_t d_inner = hparams.ssm_d_inner;
  6931. const int64_t d_state = hparams.ssm_d_state;
  6932. const int64_t dt_rank = hparams.ssm_dt_rank;
  6933. const int64_t n_seqs = ubatch.n_seqs;
  6934. // Some variants of Mamba arch (e.g. FalconMamba do apply layer norm on B and Dt layers)
  6935. const bool ssm_dt_b_c_rms = hparams.ssm_dt_b_c_rms;
  6936. // Use the same RMS norm as the final layer norm
  6937. const float norm_rms_eps = hparams.f_norm_rms_eps;
  6938. const int64_t n_seq_tokens = ubatch.n_seq_tokens;
  6939. GGML_ASSERT(n_seqs != 0);
  6940. GGML_ASSERT(ubatch.equal_seqs);
  6941. GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
  6942. ggml_tensor * conv_states_all = kv_self->k_l[il];
  6943. ggml_tensor * ssm_states_all = kv_self->v_l[il];
  6944. // (ab)using the KV cache to store the states
  6945. ggml_tensor * conv = build_copy_mask_state(
  6946. gf, conv_states_all, state_copy, state_mask,
  6947. hparams.n_embd_k_s(), n_seqs);
  6948. conv = ggml_reshape_3d(ctx0, conv, d_conv - 1, d_inner, n_seqs);
  6949. ggml_tensor * ssm = build_copy_mask_state(
  6950. gf, ssm_states_all, state_copy, state_mask,
  6951. hparams.n_embd_v_s(), n_seqs);
  6952. ssm = ggml_reshape_3d(ctx0, ssm, d_state, d_inner, n_seqs);
  6953. // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs}
  6954. cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], n_seq_tokens, n_seqs);
  6955. // {n_embd, 2*d_inner} @ {n_embd, n_seq_tokens, n_seqs} => {2*d_inner, n_seq_tokens, n_seqs}
  6956. ggml_tensor * xz = build_lora_mm(model.layers[il].ssm_in, cur);
  6957. // split the above in two
  6958. // => {d_inner, n_seq_tokens, n_seqs}
  6959. ggml_tensor * x = ggml_view_3d(ctx0, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], 0);
  6960. ggml_tensor * z = ggml_view_3d(ctx0, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], d_inner*ggml_element_size(xz));
  6961. // conv
  6962. {
  6963. // => {d_conv - 1 + n_seq_tokens, d_inner, n_seqs}
  6964. ggml_tensor * conv_x = ggml_concat(ctx0, conv, ggml_transpose(ctx0, x), 0);
  6965. // copy last (d_conv - 1) columns back into the state cache
  6966. ggml_tensor * last_conv = ggml_view_3d(ctx0, conv_x, d_conv - 1, d_inner, n_seqs, conv_x->nb[1], conv_x->nb[2], n_seq_tokens*(conv_x->nb[0]));
  6967. ggml_build_forward_expand(gf,
  6968. ggml_cpy(ctx0, last_conv,
  6969. ggml_view_1d(ctx0, conv_states_all,
  6970. (d_conv - 1)*(d_inner)*(n_seqs),
  6971. kv_head*(d_conv - 1)*(d_inner)*ggml_element_size(conv_states_all))));
  6972. // 1D convolution
  6973. // The equivalent is to make a self-overlapping view of conv_x
  6974. // over d_conv columns at each stride in the 3rd dimension,
  6975. // then element-wise multiply that with the conv1d weight,
  6976. // then sum the elements of each row,
  6977. // (the last two steps are a dot product over rows (also doable with mul_mat))
  6978. // then permute away the ne[0] dimension,
  6979. // and then you're left with the resulting x tensor.
  6980. // For simultaneous sequences, all sequences need to have the same length.
  6981. x = ggml_ssm_conv(ctx0, conv_x, model.layers[il].ssm_conv1d);
  6982. // bias
  6983. x = ggml_add(ctx0, x, model.layers[il].ssm_conv1d_b);
  6984. x = ggml_silu(ctx0, x);
  6985. }
  6986. // ssm
  6987. {
  6988. // {d_inner, dt_rank + 2*d_state} @ {d_inner, n_seq_tokens, n_seqs} => {dt_rank + 2*d_state, n_seq_tokens, n_seqs}
  6989. ggml_tensor * x_db = build_lora_mm(model.layers[il].ssm_x, x);
  6990. // split
  6991. ggml_tensor * dt = ggml_view_3d(ctx0, x_db, dt_rank, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], 0);
  6992. ggml_tensor * B = ggml_view_3d(ctx0, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*dt_rank);
  6993. ggml_tensor * C = ggml_view_3d(ctx0, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*(dt_rank+d_state));
  6994. // Some Mamba variants (e.g. FalconMamba) apply RMS norm in B, C & Dt layers
  6995. if (ssm_dt_b_c_rms) {
  6996. dt = ggml_rms_norm(ctx0, dt, norm_rms_eps);
  6997. B = ggml_rms_norm(ctx0, B, norm_rms_eps);
  6998. C = ggml_rms_norm(ctx0, C, norm_rms_eps);
  6999. }
  7000. // {dt_rank, d_inner} @ {dt_rank, n_seq_tokens, n_seqs} => {d_inner, n_seq_tokens, n_seqs}
  7001. dt = build_lora_mm(model.layers[il].ssm_dt, dt);
  7002. dt = ggml_add(ctx0, dt, model.layers[il].ssm_dt_b);
  7003. // Custom operator to optimize the parallel associative scan
  7004. // as described in the Annex D of the Mamba paper.
  7005. // => {d_inner, n_seq_tokens, n_seqs} and {d_state, d_inner, n_seqs}
  7006. ggml_tensor * y_ssm = ggml_ssm_scan(ctx0, ssm, x, dt, model.layers[il].ssm_a, B, C);
  7007. // store last states
  7008. ggml_build_forward_expand(gf,
  7009. ggml_cpy(ctx0,
  7010. ggml_view_1d(ctx0, y_ssm, d_state*d_inner*n_seqs, x->nb[3]),
  7011. ggml_view_1d(ctx0, ssm_states_all, d_state*d_inner*n_seqs, kv_head*d_state*d_inner*ggml_element_size(ssm_states_all))));
  7012. ggml_tensor * y = ggml_view_3d(ctx0, y_ssm, d_inner, n_seq_tokens, n_seqs, x->nb[1], x->nb[2], 0);
  7013. // TODO: skip computing output earlier for unused tokens
  7014. // {d_inner, n_seq_tokens, n_seqs} * {d_inner} => {d_inner, n_seq_tokens, n_seqs}
  7015. y = ggml_add(ctx0, y, ggml_mul(ctx0, x, model.layers[il].ssm_d));
  7016. y = ggml_mul(ctx0, y, ggml_silu(ctx0, ggml_cont(ctx0, z)));
  7017. // {d_inner, n_embd} @ {d_inner, n_seq_tokens, n_seqs} => {n_embd, n_seq_tokens, n_seqs}
  7018. cur = build_lora_mm(model.layers[il].ssm_out, y);
  7019. }
  7020. // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens}
  7021. cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], n_seq_tokens * n_seqs);
  7022. //cb(cur, "mamba_out", il);
  7023. return cur;
  7024. }
  7025. };
  7026. struct llm_build_command_r : public llm_graph_context {
  7027. llm_build_command_r(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7028. const int64_t n_embd_head = hparams.n_embd_head_v;
  7029. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7030. const float f_logit_scale = hparams.f_logit_scale;
  7031. ggml_tensor * cur;
  7032. ggml_tensor * inpL;
  7033. inpL = build_inp_embd(model.tok_embd);
  7034. // inp_pos - contains the positions
  7035. ggml_tensor * inp_pos = build_inp_pos();
  7036. auto * inp_attn = build_attn_inp_kv_unified();
  7037. for (int il = 0; il < n_layer; ++il) {
  7038. // norm
  7039. cur = build_norm(inpL,
  7040. model.layers[il].attn_norm, NULL,
  7041. LLM_NORM, il);
  7042. cb(cur, "attn_norm", il);
  7043. ggml_tensor * ffn_inp = cur;
  7044. // self-attention
  7045. {
  7046. // compute Q and K and RoPE them
  7047. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7048. cb(Qcur, "Qcur", il);
  7049. if (model.layers[il].bq) {
  7050. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7051. cb(Qcur, "Qcur", il);
  7052. }
  7053. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7054. cb(Kcur, "Kcur", il);
  7055. if (model.layers[il].bk) {
  7056. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7057. cb(Kcur, "Kcur", il);
  7058. }
  7059. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7060. cb(Vcur, "Vcur", il);
  7061. if (model.layers[il].bv) {
  7062. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7063. cb(Vcur, "Vcur", il);
  7064. }
  7065. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7066. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7067. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7068. if (model.layers[il].attn_q_norm) {
  7069. Qcur = build_norm(Qcur,
  7070. model.layers[il].attn_q_norm,
  7071. NULL,
  7072. LLM_NORM, il);
  7073. cb(Qcur, "Qcur", il);
  7074. }
  7075. Qcur = ggml_rope_ext(
  7076. ctx0, Qcur, inp_pos, nullptr,
  7077. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7078. ext_factor, attn_factor, beta_fast, beta_slow
  7079. );
  7080. if (model.layers[il].attn_k_norm) {
  7081. Kcur = build_norm(Kcur,
  7082. model.layers[il].attn_k_norm,
  7083. NULL,
  7084. LLM_NORM, il);
  7085. cb(Kcur, "Kcur", il);
  7086. }
  7087. Kcur = ggml_rope_ext(
  7088. ctx0, Kcur, inp_pos, nullptr,
  7089. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7090. ext_factor, attn_factor, beta_fast, beta_slow
  7091. );
  7092. cb(Qcur, "Qcur", il);
  7093. cb(Kcur, "Kcur", il);
  7094. cb(Vcur, "Vcur", il);
  7095. cur = build_attn(inp_attn, gf,
  7096. model.layers[il].wo, model.layers[il].bo,
  7097. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7098. }
  7099. if (il == n_layer - 1) {
  7100. // skip computing output for unused tokens
  7101. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7102. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7103. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7104. ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
  7105. }
  7106. ggml_tensor * attn_out = cur;
  7107. // feed-forward network
  7108. {
  7109. cur = build_ffn(ffn_inp,
  7110. model.layers[il].ffn_up, NULL, NULL,
  7111. model.layers[il].ffn_gate, NULL, NULL,
  7112. model.layers[il].ffn_down, NULL, NULL,
  7113. NULL,
  7114. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7115. cb(cur, "ffn_out", il);
  7116. }
  7117. // add together residual + FFN + self-attention
  7118. cur = ggml_add(ctx0, cur, inpL);
  7119. cur = ggml_add(ctx0, cur, attn_out);
  7120. cur = build_cvec(cur, il);
  7121. cb(cur, "l_out", il);
  7122. // input for next layer
  7123. inpL = cur;
  7124. }
  7125. cur = inpL;
  7126. cur = build_norm(cur,
  7127. model.output_norm, NULL,
  7128. LLM_NORM, -1);
  7129. cb(cur, "result_norm", -1);
  7130. res->t_embd = cur;
  7131. // lm_head
  7132. cur = build_lora_mm(model.output, cur);
  7133. if (f_logit_scale) {
  7134. cur = ggml_scale(ctx0, cur, f_logit_scale);
  7135. }
  7136. cb(cur, "result_output", -1);
  7137. res->t_logits = cur;
  7138. ggml_build_forward_expand(gf, cur);
  7139. }
  7140. };
  7141. struct llm_build_cohere2 : public llm_graph_context {
  7142. llm_build_cohere2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7143. const int64_t n_embd_head = hparams.n_embd_head_v;
  7144. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7145. const float f_logit_scale = hparams.f_logit_scale;
  7146. ggml_tensor * cur;
  7147. ggml_tensor * inpL;
  7148. inpL = build_inp_embd(model.tok_embd);
  7149. // inp_pos - contains the positions
  7150. ggml_tensor * inp_pos = build_inp_pos();
  7151. auto * inp_attn = build_attn_inp_kv_unified();
  7152. for (int il = 0; il < n_layer; ++il) {
  7153. const bool is_swa = hparams.is_swa(il);
  7154. // norm
  7155. cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM, il);
  7156. cb(cur, "attn_norm", il);
  7157. ggml_tensor * ffn_inp = cur;
  7158. // self-attention
  7159. {
  7160. // rope freq factors for 128k context
  7161. ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il);
  7162. // compute Q and K and RoPE them
  7163. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7164. cb(Qcur, "Qcur", il);
  7165. if (model.layers[il].bq) {
  7166. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7167. cb(Qcur, "Qcur", il);
  7168. }
  7169. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7170. cb(Kcur, "Kcur", il);
  7171. if (model.layers[il].bk) {
  7172. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7173. cb(Kcur, "Kcur", il);
  7174. }
  7175. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7176. cb(Vcur, "Vcur", il);
  7177. if (model.layers[il].bv) {
  7178. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7179. cb(Vcur, "Vcur", il);
  7180. }
  7181. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7182. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7183. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7184. if (is_swa) {
  7185. Qcur = ggml_rope_ext(
  7186. ctx0, Qcur, inp_pos, rope_factors,
  7187. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7188. ext_factor, attn_factor, beta_fast, beta_slow
  7189. );
  7190. Kcur = ggml_rope_ext(
  7191. ctx0, Kcur, inp_pos, rope_factors,
  7192. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7193. ext_factor, attn_factor, beta_fast, beta_slow
  7194. );
  7195. }
  7196. cb(Qcur, "Qcur", il);
  7197. cb(Kcur, "Kcur", il);
  7198. cb(Vcur, "Vcur", il);
  7199. cur = build_attn(inp_attn, gf,
  7200. model.layers[il].wo, model.layers[il].bo,
  7201. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7202. }
  7203. if (il == n_layer - 1) {
  7204. // skip computing output for unused tokens
  7205. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7206. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7207. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7208. ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
  7209. }
  7210. ggml_tensor * attn_out = cur;
  7211. // feed-forward network
  7212. {
  7213. cur = build_ffn(ffn_inp, model.layers[il].ffn_up, NULL, NULL, model.layers[il].ffn_gate,
  7214. NULL, NULL, model.layers[il].ffn_down, NULL, NULL, NULL, LLM_FFN_SILU, LLM_FFN_PAR,
  7215. il);
  7216. cb(cur, "ffn_out", il);
  7217. }
  7218. // add together residual + FFN + self-attention
  7219. cur = ggml_add(ctx0, cur, inpL);
  7220. cur = ggml_add(ctx0, cur, attn_out);
  7221. cur = build_cvec(cur, il);
  7222. cb(cur, "l_out", il);
  7223. // input for next layer
  7224. inpL = cur;
  7225. }
  7226. cur = inpL;
  7227. cur = build_norm(cur, model.output_norm, NULL, LLM_NORM, -1);
  7228. cb(cur, "result_norm", -1);
  7229. res->t_embd = cur;
  7230. // lm_head
  7231. cur = build_lora_mm(model.output, cur);
  7232. if (f_logit_scale) {
  7233. cur = ggml_scale(ctx0, cur, f_logit_scale);
  7234. }
  7235. cb(cur, "result_output", -1);
  7236. res->t_logits = cur;
  7237. ggml_build_forward_expand(gf, cur);
  7238. }
  7239. };
  7240. // ref: https://allenai.org/olmo
  7241. // based on the original build_llama() function, changes:
  7242. // * non-parametric layer norm
  7243. // * clamp qkv
  7244. // * removed bias
  7245. // * removed MoE
  7246. struct llm_build_olmo : public llm_graph_context {
  7247. llm_build_olmo(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7248. const int64_t n_embd_head = hparams.n_embd_head_v;
  7249. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7250. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7251. ggml_tensor * cur;
  7252. ggml_tensor * inpL;
  7253. inpL = build_inp_embd(model.tok_embd);
  7254. // inp_pos - contains the positions
  7255. ggml_tensor * inp_pos = build_inp_pos();
  7256. auto * inp_attn = build_attn_inp_kv_unified();
  7257. for (int il = 0; il < n_layer; ++il) {
  7258. ggml_tensor * inpSA = inpL;
  7259. // norm
  7260. cur = build_norm(inpL,
  7261. NULL, NULL,
  7262. LLM_NORM, il);
  7263. cb(cur, "attn_norm", il);
  7264. // self-attention
  7265. {
  7266. // compute Q and K and RoPE them
  7267. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7268. cb(Qcur, "Qcur", il);
  7269. if (hparams.f_clamp_kqv > 0.0f) {
  7270. Qcur = ggml_clamp(ctx0, Qcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  7271. cb(Qcur, "Qcur", il);
  7272. }
  7273. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7274. cb(Kcur, "Kcur", il);
  7275. if (hparams.f_clamp_kqv > 0.0f) {
  7276. Kcur = ggml_clamp(ctx0, Kcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  7277. cb(Kcur, "Kcur", il);
  7278. }
  7279. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7280. cb(Vcur, "Vcur", il);
  7281. if (hparams.f_clamp_kqv > 0.0f) {
  7282. Vcur = ggml_clamp(ctx0, Vcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  7283. cb(Vcur, "Vcur", il);
  7284. }
  7285. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7286. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7287. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7288. Qcur = ggml_rope_ext(
  7289. ctx0, Qcur, inp_pos, nullptr,
  7290. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7291. ext_factor, attn_factor, beta_fast, beta_slow
  7292. );
  7293. Kcur = ggml_rope_ext(
  7294. ctx0, Kcur, inp_pos, nullptr,
  7295. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7296. ext_factor, attn_factor, beta_fast, beta_slow
  7297. );
  7298. cb(Qcur, "Qcur", il);
  7299. cb(Kcur, "Kcur", il);
  7300. cb(Vcur, "Vcur", il);
  7301. cur = build_attn(inp_attn, gf,
  7302. model.layers[il].wo, nullptr,
  7303. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7304. }
  7305. if (il == n_layer - 1) {
  7306. // skip computing output for unused tokens
  7307. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7308. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7309. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7310. }
  7311. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7312. cb(ffn_inp, "ffn_inp", il);
  7313. // feed-forward network
  7314. cur = build_norm(ffn_inp,
  7315. NULL, NULL,
  7316. LLM_NORM, il);
  7317. cb(cur, "ffn_norm", il);
  7318. cur = build_ffn(cur,
  7319. model.layers[il].ffn_up, NULL, NULL,
  7320. model.layers[il].ffn_gate, NULL, NULL,
  7321. model.layers[il].ffn_down, NULL, NULL,
  7322. NULL,
  7323. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7324. cb(cur, "ffn_out", il);
  7325. cur = ggml_add(ctx0, cur, ffn_inp);
  7326. cb(cur, "ffn_out", il);
  7327. cur = build_cvec(cur, il);
  7328. cb(cur, "l_out", il);
  7329. // input for next layer
  7330. inpL = cur;
  7331. }
  7332. cur = inpL;
  7333. cur = build_norm(cur,
  7334. NULL, NULL,
  7335. LLM_NORM, -1);
  7336. cb(cur, "result_norm", -1);
  7337. res->t_embd = cur;
  7338. // lm_head
  7339. cur = build_lora_mm(model.output, cur);
  7340. cb(cur, "result_output", -1);
  7341. res->t_logits = cur;
  7342. ggml_build_forward_expand(gf, cur);
  7343. }
  7344. };
  7345. struct llm_build_olmo2 : public llm_graph_context {
  7346. llm_build_olmo2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7347. const int64_t n_embd_head = hparams.n_embd_head_v;
  7348. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7349. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7350. ggml_tensor * cur;
  7351. ggml_tensor * inpL;
  7352. inpL = build_inp_embd(model.tok_embd);
  7353. // inp_pos - contains the positions
  7354. ggml_tensor * inp_pos = build_inp_pos();
  7355. auto * inp_attn = build_attn_inp_kv_unified();
  7356. for (int il = 0; il < n_layer; ++il) {
  7357. ggml_tensor * inpSA = inpL;
  7358. cur = inpL;
  7359. // self_attention
  7360. {
  7361. // compute Q and K and RoPE them
  7362. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7363. cb(Qcur, "Qcur", il);
  7364. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7365. cb(Kcur, "Kcur", il);
  7366. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7367. cb(Vcur, "Vcur", il);
  7368. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL,
  7369. LLM_NORM_RMS, il);
  7370. cb(Qcur, "Qcur_normed", il);
  7371. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL,
  7372. LLM_NORM_RMS, il);
  7373. cb(Kcur, "Kcur_normed", il);
  7374. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7375. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7376. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7377. Qcur = ggml_rope_ext(
  7378. ctx0, Qcur, inp_pos, nullptr,
  7379. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7380. ext_factor, attn_factor, beta_fast, beta_slow
  7381. );
  7382. Kcur = ggml_rope_ext(
  7383. ctx0, Kcur, inp_pos, nullptr,
  7384. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7385. ext_factor, attn_factor, beta_fast, beta_slow
  7386. );
  7387. cb(Qcur, "Qcur", il);
  7388. cb(Kcur, "Kcur", il);
  7389. cb(Vcur, "Vcur", il);
  7390. cur = build_attn(inp_attn, gf,
  7391. model.layers[il].wo, NULL,
  7392. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7393. }
  7394. cur = build_norm(cur,
  7395. model.layers[il].attn_post_norm, NULL,
  7396. LLM_NORM_RMS, il);
  7397. cb(cur, "attn_post_norm", il);
  7398. if (il == n_layer - 1) {
  7399. // skip computing output for unused tokens
  7400. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7401. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7402. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7403. }
  7404. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7405. cb(ffn_inp, "ffn_inp", il);
  7406. // feed-forward network
  7407. cur = build_ffn(ffn_inp,
  7408. model.layers[il].ffn_up, NULL, NULL,
  7409. model.layers[il].ffn_gate, NULL, NULL,
  7410. model.layers[il].ffn_down, NULL, NULL,
  7411. NULL,
  7412. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7413. cb(cur, "ffn_out", il);
  7414. cur = build_norm(cur,
  7415. model.layers[il].ffn_post_norm, NULL,
  7416. LLM_NORM_RMS, -1);
  7417. cb(cur, "ffn_post_norm", -1);
  7418. cur = ggml_add(ctx0, cur, ffn_inp);
  7419. cb(cur, "ffn_out", il);
  7420. cur = build_cvec(cur, il);
  7421. cb(cur, "l_out", il);
  7422. // input for next layer
  7423. inpL = cur;
  7424. }
  7425. cur = inpL;
  7426. cur = build_norm(cur,
  7427. model.output_norm, NULL,
  7428. LLM_NORM_RMS, -1);
  7429. cb(cur, "result_norm", -1);
  7430. res->t_embd = cur;
  7431. // lm_head
  7432. cur = build_lora_mm(model.output, cur);
  7433. cb(cur, "result_output", -1);
  7434. res->t_logits = cur;
  7435. ggml_build_forward_expand(gf, cur);
  7436. }
  7437. };
  7438. // based on the build_qwen2moe() function, changes:
  7439. // * removed shared experts
  7440. // * removed bias
  7441. // * added q, k norm
  7442. struct llm_build_olmoe : public llm_graph_context {
  7443. llm_build_olmoe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7444. const int64_t n_embd_head = hparams.n_embd_head_v;
  7445. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7446. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7447. ggml_tensor * cur;
  7448. ggml_tensor * inpL;
  7449. inpL = build_inp_embd(model.tok_embd);
  7450. // inp_pos - contains the positions
  7451. ggml_tensor * inp_pos = build_inp_pos();
  7452. auto * inp_attn = build_attn_inp_kv_unified();
  7453. for (int il = 0; il < n_layer; ++il) {
  7454. ggml_tensor * inpSA = inpL;
  7455. // norm
  7456. cur = build_norm(inpL,
  7457. model.layers[il].attn_norm, NULL,
  7458. LLM_NORM_RMS, il);
  7459. cb(cur, "attn_norm", il);
  7460. // self_attention
  7461. {
  7462. // compute Q and K and RoPE them
  7463. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7464. cb(Qcur, "Qcur", il);
  7465. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7466. cb(Kcur, "Kcur", il);
  7467. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7468. cb(Vcur, "Vcur", il);
  7469. Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL,
  7470. LLM_NORM_RMS, il);
  7471. cb(Qcur, "Qcur_normed", il);
  7472. Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL,
  7473. LLM_NORM_RMS, il);
  7474. cb(Kcur, "Kcur_normed", il);
  7475. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7476. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7477. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7478. Qcur = ggml_rope_ext(
  7479. ctx0, Qcur, inp_pos, nullptr,
  7480. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7481. ext_factor, attn_factor, beta_fast, beta_slow
  7482. );
  7483. Kcur = ggml_rope_ext(
  7484. ctx0, Kcur, inp_pos, nullptr,
  7485. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7486. ext_factor, attn_factor, beta_fast, beta_slow
  7487. );
  7488. cb(Qcur, "Qcur", il);
  7489. cb(Kcur, "Kcur", il);
  7490. cb(Vcur, "Vcur", il);
  7491. cur = build_attn(inp_attn, gf,
  7492. model.layers[il].wo, NULL,
  7493. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7494. }
  7495. if (il == n_layer - 1) {
  7496. // skip computing output for unused tokens
  7497. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7498. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7499. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7500. }
  7501. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7502. cb(ffn_inp, "ffn_inp", il);
  7503. // MoE branch
  7504. cur = build_norm(ffn_inp,
  7505. model.layers[il].ffn_norm, NULL,
  7506. LLM_NORM_RMS, il);
  7507. cb(cur, "ffn_norm", il);
  7508. cur = build_moe_ffn(cur,
  7509. model.layers[il].ffn_gate_inp,
  7510. model.layers[il].ffn_up_exps,
  7511. model.layers[il].ffn_gate_exps,
  7512. model.layers[il].ffn_down_exps,
  7513. nullptr,
  7514. n_expert, n_expert_used,
  7515. LLM_FFN_SILU, false,
  7516. false, 0.0,
  7517. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  7518. il);
  7519. cb(cur, "ffn_moe_out", il);
  7520. cur = ggml_add(ctx0, cur, ffn_inp);
  7521. cur = build_cvec(cur, il);
  7522. cb(cur, "l_out", il);
  7523. // input for next layer
  7524. inpL = cur;
  7525. }
  7526. cur = inpL;
  7527. cur = build_norm(cur,
  7528. model.output_norm, NULL,
  7529. LLM_NORM_RMS, -1);
  7530. cb(cur, "result_norm", -1);
  7531. res->t_embd = cur;
  7532. // lm_head
  7533. cur = build_lora_mm(model.output, cur);
  7534. cb(cur, "result_output", -1);
  7535. res->t_logits = cur;
  7536. ggml_build_forward_expand(gf, cur);
  7537. }
  7538. };
  7539. struct llm_build_openelm : public llm_graph_context {
  7540. llm_build_openelm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7541. const int64_t n_embd_head = hparams.n_embd_head_v;
  7542. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7543. ggml_tensor * cur;
  7544. ggml_tensor * inpL;
  7545. inpL = build_inp_embd(model.tok_embd);
  7546. // inp_pos - contains the positions
  7547. ggml_tensor * inp_pos = build_inp_pos();
  7548. auto * inp_attn = build_attn_inp_kv_unified();
  7549. for (int il = 0; il < n_layer; ++il) {
  7550. const int64_t n_head = hparams.n_head(il);
  7551. const int64_t n_head_kv = hparams.n_head_kv(il);
  7552. const int64_t n_head_qkv = 2*n_head_kv + n_head;
  7553. cur = inpL;
  7554. ggml_tensor * residual = cur;
  7555. // norm
  7556. cur = build_norm(inpL,
  7557. model.layers[il].attn_norm, NULL,
  7558. LLM_NORM_RMS, il);
  7559. cb(cur, "attn_norm", il);
  7560. // self-attention
  7561. {
  7562. cur = build_lora_mm(model.layers[il].wqkv, cur);
  7563. cb(cur, "wqkv", il);
  7564. cur = ggml_reshape_3d(ctx0, cur, n_embd_head_k, n_head_qkv, n_tokens);
  7565. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, cur->nb[1], cur->nb[2], 0));
  7566. cb(Qcur, "Qcur", il);
  7567. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*n_head));
  7568. cb(Kcur, "Kcur", il);
  7569. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*(n_head+n_head_kv)));
  7570. cb(Vcur, "Vcur", il);
  7571. Qcur = build_norm(Qcur,
  7572. model.layers[il].attn_q_norm, NULL,
  7573. LLM_NORM_RMS, il);
  7574. cb(Qcur, "Qcur", il);
  7575. Kcur = build_norm(Kcur,
  7576. model.layers[il].attn_k_norm, NULL,
  7577. LLM_NORM_RMS, il);
  7578. cb(Kcur, "Kcur", il);
  7579. Qcur = ggml_rope_ext(
  7580. ctx0, Qcur, inp_pos, NULL,
  7581. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7582. ext_factor, attn_factor, beta_fast, beta_slow
  7583. );
  7584. Kcur = ggml_rope_ext(
  7585. ctx0, Kcur, inp_pos, NULL,
  7586. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7587. ext_factor, attn_factor, beta_fast, beta_slow
  7588. );
  7589. cb(Qcur, "Qcur", il);
  7590. cb(Kcur, "Kcur", il);
  7591. cb(Qcur, "Vcur", il);
  7592. cur = build_attn(inp_attn, gf,
  7593. model.layers[il].wo, NULL,
  7594. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7595. }
  7596. if (il == n_layer - 1) {
  7597. // skip computing output for unused tokens
  7598. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7599. residual = ggml_get_rows(ctx0, residual, inp_out_ids);
  7600. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7601. }
  7602. ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
  7603. cb(ffn_inp, "ffn_inp", il);
  7604. // feed-forward network
  7605. {
  7606. cur = build_norm(ffn_inp,
  7607. model.layers[il].ffn_norm, NULL,
  7608. LLM_NORM_RMS, il);
  7609. cb(cur, "ffn_norm", il);
  7610. cur = build_ffn(cur,
  7611. model.layers[il].ffn_up, NULL, NULL,
  7612. model.layers[il].ffn_gate, NULL, NULL,
  7613. model.layers[il].ffn_down, NULL, NULL,
  7614. NULL,
  7615. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7616. cb(cur, "ffn_out", il);
  7617. }
  7618. cur = ggml_add(ctx0, cur, ffn_inp);
  7619. cur = build_cvec(cur, il);
  7620. cb(cur, "l_out", il);
  7621. inpL = cur;
  7622. }
  7623. cur = inpL;
  7624. // norm
  7625. cur = build_norm(cur,
  7626. model.output_norm, NULL,
  7627. LLM_NORM_RMS, -1);
  7628. cb(cur, "result_norm", -1);
  7629. res->t_embd = cur;
  7630. cur = build_lora_mm(model.output, cur);
  7631. cb(cur, "result_output", -1);
  7632. res->t_logits = cur;
  7633. ggml_build_forward_expand(gf, cur);
  7634. }
  7635. };
  7636. struct llm_build_gptneox : public llm_graph_context {
  7637. llm_build_gptneox(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7638. const int64_t n_embd_head = hparams.n_embd_head_v;
  7639. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7640. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7641. ggml_tensor * cur;
  7642. ggml_tensor * inpL;
  7643. inpL = build_inp_embd(model.tok_embd);
  7644. // inp_pos - contains the positions
  7645. ggml_tensor * inp_pos = build_inp_pos();
  7646. auto * inp_attn = build_attn_inp_kv_unified();
  7647. for (int il = 0; il < n_layer; ++il) {
  7648. cur = build_norm(inpL,
  7649. model.layers[il].attn_norm,
  7650. model.layers[il].attn_norm_b,
  7651. LLM_NORM, il);
  7652. cb(cur, "attn_norm", il);
  7653. // self-attention
  7654. {
  7655. cur = build_lora_mm(model.layers[il].wqkv, cur);
  7656. cb(cur, "wqkv", il);
  7657. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7658. cb(cur, "bqkv", il);
  7659. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7660. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7661. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  7662. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7663. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7664. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7665. Qcur = ggml_rope_ext(
  7666. ctx0, Qcur, inp_pos, nullptr,
  7667. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7668. ext_factor, attn_factor, beta_fast, beta_slow
  7669. );
  7670. Kcur = ggml_rope_ext(
  7671. ctx0, Kcur, inp_pos, nullptr,
  7672. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7673. ext_factor, attn_factor, beta_fast, beta_slow
  7674. );
  7675. cb(Qcur, "Qcur", il);
  7676. cb(Kcur, "Kcur", il);
  7677. cb(Vcur, "Vcur", il);
  7678. cur = build_attn(inp_attn, gf,
  7679. model.layers[il].wo, model.layers[il].bo,
  7680. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7681. }
  7682. if (il == n_layer - 1) {
  7683. // skip computing output for unused tokens
  7684. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7685. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7686. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7687. }
  7688. // ffn
  7689. if (hparams.use_par_res) {
  7690. // attention and ffn are computed in parallel
  7691. // x = x + attn(ln1(x)) + ffn(ln2(x))
  7692. ggml_tensor * attn_out = cur;
  7693. cur = build_norm(inpL,
  7694. model.layers[il].ffn_norm,
  7695. model.layers[il].ffn_norm_b,
  7696. LLM_NORM, il);
  7697. cb(cur, "ffn_norm", il);
  7698. cur = build_ffn(cur,
  7699. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  7700. NULL, NULL, NULL,
  7701. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  7702. NULL,
  7703. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  7704. cb(cur, "ffn_out", il);
  7705. cur = ggml_add(ctx0, cur, inpL);
  7706. cb(cur, "ffn_out", il);
  7707. cur = ggml_add(ctx0, cur, attn_out);
  7708. cur = build_cvec(cur, il);
  7709. cb(cur, "l_out", il);
  7710. // input for next layer
  7711. inpL = cur;
  7712. } else {
  7713. // attention and ffn are computed sequentially
  7714. // x = x + attn(ln1(x))
  7715. // x = x + ffn(ln2(x))
  7716. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  7717. cb(ffn_inp, "ffn_inp", il);
  7718. cur = build_norm(ffn_inp,
  7719. model.layers[il].ffn_norm,
  7720. model.layers[il].ffn_norm_b,
  7721. LLM_NORM, il);
  7722. cb(cur, "ffn_norm", il);
  7723. cur = build_ffn(cur,
  7724. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  7725. NULL, NULL, NULL,
  7726. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  7727. NULL,
  7728. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  7729. cb(cur, "ffn_out", il);
  7730. cur = ggml_add(ctx0, cur, ffn_inp);
  7731. cur = build_cvec(cur, il);
  7732. cb(cur, "l_out", il);
  7733. // input for next layer
  7734. inpL = cur;
  7735. }
  7736. }
  7737. cur = build_norm(inpL,
  7738. model.output_norm,
  7739. model.output_norm_b,
  7740. LLM_NORM, -1);
  7741. cb(cur, "result_norm", -1);
  7742. res->t_embd = cur;
  7743. cur = build_lora_mm(model.output, cur);
  7744. cb(cur, "result_output", -1);
  7745. res->t_logits = cur;
  7746. ggml_build_forward_expand(gf, cur);
  7747. }
  7748. };
  7749. struct llm_build_arctic : public llm_graph_context {
  7750. llm_build_arctic(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7751. const int64_t n_embd_head = hparams.n_embd_head_v;
  7752. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7753. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7754. ggml_tensor * cur;
  7755. ggml_tensor * inpL;
  7756. inpL = build_inp_embd(model.tok_embd);
  7757. // inp_pos - contains the positions
  7758. ggml_tensor * inp_pos = build_inp_pos();
  7759. auto * inp_attn = build_attn_inp_kv_unified();
  7760. for (int il = 0; il < n_layer; ++il) {
  7761. ggml_tensor * inpSA = inpL;
  7762. // norm
  7763. cur = build_norm(inpL,
  7764. model.layers[il].attn_norm, NULL,
  7765. LLM_NORM_RMS, il);
  7766. cb(cur, "attn_norm", il);
  7767. // self-attention
  7768. {
  7769. // compute Q and K and RoPE them
  7770. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7771. cb(Qcur, "Qcur", il);
  7772. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7773. cb(Kcur, "Kcur", il);
  7774. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7775. cb(Vcur, "Vcur", il);
  7776. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7777. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7778. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7779. Qcur = ggml_rope_ext(
  7780. ctx0, Qcur, inp_pos, nullptr,
  7781. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7782. ext_factor, attn_factor, beta_fast, beta_slow
  7783. );
  7784. Kcur = ggml_rope_ext(
  7785. ctx0, Kcur, inp_pos, nullptr,
  7786. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7787. ext_factor, attn_factor, beta_fast, beta_slow
  7788. );
  7789. cb(Qcur, "Qcur", il);
  7790. cb(Kcur, "Kcur", il);
  7791. cb(Vcur, "Vcur", il);
  7792. cur = build_attn(inp_attn, gf,
  7793. model.layers[il].wo, NULL,
  7794. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  7795. }
  7796. if (il == n_layer - 1) {
  7797. // skip computing output for unused tokens
  7798. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7799. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7800. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7801. }
  7802. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7803. cb(ffn_inp, "ffn_inp", il);
  7804. // feed-forward network
  7805. cur = build_norm(ffn_inp,
  7806. model.layers[il].ffn_norm, NULL,
  7807. LLM_NORM_RMS, il);
  7808. cb(cur, "ffn_norm", il);
  7809. cur = build_ffn(cur,
  7810. model.layers[il].ffn_up, NULL, NULL,
  7811. model.layers[il].ffn_gate, NULL, NULL,
  7812. model.layers[il].ffn_down, NULL, NULL,
  7813. NULL,
  7814. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7815. cb(cur, "ffn_out", il);
  7816. ggml_tensor * ffn_out = ggml_add(ctx0, cur, ffn_inp);
  7817. cb(ffn_out, "ffn_out", il);
  7818. // MoE
  7819. cur = build_norm(inpSA,
  7820. model.layers[il].ffn_norm_exps, NULL,
  7821. LLM_NORM_RMS, il);
  7822. cb(cur, "ffn_norm_exps", il);
  7823. cur = build_moe_ffn(cur,
  7824. model.layers[il].ffn_gate_inp,
  7825. model.layers[il].ffn_up_exps,
  7826. model.layers[il].ffn_gate_exps,
  7827. model.layers[il].ffn_down_exps,
  7828. nullptr,
  7829. n_expert, n_expert_used,
  7830. LLM_FFN_SILU, true,
  7831. false, 0.0,
  7832. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  7833. il);
  7834. cb(cur, "ffn_moe_out", il);
  7835. cur = ggml_add(ctx0, cur, ffn_out);
  7836. cb(cur, "ffn_out", il);
  7837. cur = build_cvec(cur, il);
  7838. cb(cur, "l_out", il);
  7839. // input for next layer
  7840. inpL = cur;
  7841. }
  7842. cur = inpL;
  7843. cur = build_norm(cur,
  7844. model.output_norm, NULL,
  7845. LLM_NORM_RMS, -1);
  7846. cb(cur, "result_norm", -1);
  7847. res->t_embd = cur;
  7848. // lm_head
  7849. cur = build_lora_mm(model.output, cur);
  7850. cb(cur, "result_output", -1);
  7851. res->t_logits = cur;
  7852. ggml_build_forward_expand(gf, cur);
  7853. }
  7854. };
  7855. struct llm_build_deepseek : public llm_graph_context {
  7856. llm_build_deepseek(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7857. const int64_t n_embd_head = hparams.n_embd_head_v;
  7858. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7859. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7860. ggml_tensor * cur;
  7861. ggml_tensor * inpL;
  7862. inpL = build_inp_embd(model.tok_embd);
  7863. // inp_pos - contains the positions
  7864. ggml_tensor * inp_pos = build_inp_pos();
  7865. auto * inp_attn = build_attn_inp_kv_unified();
  7866. const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
  7867. for (int il = 0; il < n_layer; ++il) {
  7868. ggml_tensor * inpSA = inpL;
  7869. // norm
  7870. cur = build_norm(inpL,
  7871. model.layers[il].attn_norm, NULL,
  7872. LLM_NORM_RMS, il);
  7873. cb(cur, "attn_norm", il);
  7874. // self-attention
  7875. {
  7876. // rope freq factors for llama3; may return nullptr for llama2 and other models
  7877. ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il);
  7878. // compute Q and K and RoPE them
  7879. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  7880. cb(Qcur, "Qcur", il);
  7881. if (model.layers[il].bq) {
  7882. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7883. cb(Qcur, "Qcur", il);
  7884. }
  7885. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  7886. cb(Kcur, "Kcur", il);
  7887. if (model.layers[il].bk) {
  7888. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7889. cb(Kcur, "Kcur", il);
  7890. }
  7891. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  7892. cb(Vcur, "Vcur", il);
  7893. if (model.layers[il].bv) {
  7894. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7895. cb(Vcur, "Vcur", il);
  7896. }
  7897. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7898. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7899. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  7900. Qcur = ggml_rope_ext(
  7901. ctx0, Qcur, inp_pos, rope_factors,
  7902. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7903. ext_factor, attn_factor, beta_fast, beta_slow
  7904. );
  7905. Kcur = ggml_rope_ext(
  7906. ctx0, Kcur, inp_pos, rope_factors,
  7907. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7908. ext_factor, attn_factor, beta_fast, beta_slow
  7909. );
  7910. cb(Qcur, "Qcur", il);
  7911. cb(Kcur, "Kcur", il);
  7912. cb(Vcur, "Vcur", il);
  7913. cur = build_attn(inp_attn, gf,
  7914. model.layers[il].wo, model.layers[il].bo,
  7915. Qcur, Kcur, Vcur, nullptr, kq_scale, il);
  7916. }
  7917. if (il == n_layer - 1) {
  7918. // skip computing output for unused tokens
  7919. ggml_tensor * inp_out_ids = build_inp_out_ids();
  7920. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7921. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7922. }
  7923. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7924. cb(ffn_inp, "ffn_inp", il);
  7925. cur = build_norm(ffn_inp,
  7926. model.layers[il].ffn_norm, NULL,
  7927. LLM_NORM_RMS, il);
  7928. cb(cur, "ffn_norm", il);
  7929. if ((uint32_t) il < hparams.n_layer_dense_lead) {
  7930. cur = build_ffn(cur,
  7931. model.layers[il].ffn_up, NULL, NULL,
  7932. model.layers[il].ffn_gate, NULL, NULL,
  7933. model.layers[il].ffn_down, NULL, NULL,
  7934. NULL,
  7935. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7936. cb(cur, "ffn_out", il);
  7937. } else {
  7938. // MoE branch
  7939. ggml_tensor * moe_out =
  7940. build_moe_ffn(cur,
  7941. model.layers[il].ffn_gate_inp,
  7942. model.layers[il].ffn_up_exps,
  7943. model.layers[il].ffn_gate_exps,
  7944. model.layers[il].ffn_down_exps,
  7945. nullptr,
  7946. n_expert, n_expert_used,
  7947. LLM_FFN_SILU, false,
  7948. false, hparams.expert_weights_scale,
  7949. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  7950. il);
  7951. cb(moe_out, "ffn_moe_out", il);
  7952. // FFN shared expert
  7953. {
  7954. ggml_tensor * ffn_shexp = build_ffn(cur,
  7955. model.layers[il].ffn_up_shexp, NULL, NULL,
  7956. model.layers[il].ffn_gate_shexp, NULL, NULL,
  7957. model.layers[il].ffn_down_shexp, NULL, NULL,
  7958. NULL,
  7959. LLM_FFN_SILU, LLM_FFN_PAR, il);
  7960. cb(ffn_shexp, "ffn_shexp", il);
  7961. cur = ggml_add(ctx0, moe_out, ffn_shexp);
  7962. cb(cur, "ffn_out", il);
  7963. }
  7964. }
  7965. cur = ggml_add(ctx0, cur, ffn_inp);
  7966. cur = build_cvec(cur, il);
  7967. cb(cur, "l_out", il);
  7968. // input for next layer
  7969. inpL = cur;
  7970. }
  7971. cur = inpL;
  7972. cur = build_norm(cur,
  7973. model.output_norm, NULL,
  7974. LLM_NORM_RMS, -1);
  7975. cb(cur, "result_norm", -1);
  7976. res->t_embd = cur;
  7977. // lm_head
  7978. cur = build_lora_mm(model.output, cur);
  7979. cb(cur, "result_output", -1);
  7980. res->t_logits = cur;
  7981. ggml_build_forward_expand(gf, cur);
  7982. }
  7983. };
  7984. struct llm_build_deepseek2 : public llm_graph_context {
  7985. llm_build_deepseek2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  7986. bool is_lite = (hparams.n_layer == 27);
  7987. // We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly.
  7988. // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.
  7989. const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale));
  7990. const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(hparams.n_embd_head_k));
  7991. const float attn_factor_scaled = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale));
  7992. const uint32_t n_embd_head_qk_rope = hparams.n_rot;
  7993. const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  7994. const uint32_t kv_lora_rank = hparams.n_lora_kv;
  7995. ggml_tensor * cur;
  7996. ggml_tensor * inpL;
  7997. // {n_embd, n_tokens}
  7998. inpL = build_inp_embd(model.tok_embd);
  7999. // inp_pos - contains the positions
  8000. ggml_tensor * inp_pos = build_inp_pos();
  8001. auto * inp_attn = build_attn_inp_kv_unified();
  8002. for (int il = 0; il < n_layer; ++il) {
  8003. ggml_tensor * inpSA = inpL;
  8004. // norm
  8005. cur = build_norm(inpL,
  8006. model.layers[il].attn_norm, NULL,
  8007. LLM_NORM_RMS, il);
  8008. cb(cur, "attn_norm", il);
  8009. // self_attention
  8010. {
  8011. ggml_tensor * q = NULL;
  8012. if (!is_lite) {
  8013. // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens}
  8014. q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
  8015. cb(q, "q", il);
  8016. q = build_norm(q,
  8017. model.layers[il].attn_q_a_norm, NULL,
  8018. LLM_NORM_RMS, il);
  8019. cb(q, "q", il);
  8020. // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens}
  8021. q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
  8022. cb(q, "q", il);
  8023. } else {
  8024. q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8025. cb(q, "q", il);
  8026. }
  8027. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  8028. ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
  8029. ggml_row_size(q->type, hparams.n_embd_head_k),
  8030. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  8031. 0);
  8032. cb(q_nope, "q_nope", il);
  8033. // and {n_head * n_embd_head_qk_rope, n_tokens}
  8034. ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
  8035. ggml_row_size(q->type, hparams.n_embd_head_k),
  8036. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  8037. ggml_row_size(q->type, n_embd_head_qk_nope));
  8038. cb(q_pe, "q_pe", il);
  8039. // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
  8040. ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
  8041. cb(kv_pe_compresseed, "kv_pe_compresseed", il);
  8042. // split into {kv_lora_rank, n_tokens}
  8043. ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
  8044. kv_pe_compresseed->nb[1],
  8045. 0);
  8046. cb(kv_compressed, "kv_compressed", il);
  8047. // and {n_embd_head_qk_rope, n_tokens}
  8048. ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
  8049. kv_pe_compresseed->nb[1],
  8050. kv_pe_compresseed->nb[1],
  8051. ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
  8052. cb(k_pe, "k_pe", il);
  8053. // TODO: the CUDA backend used to not support non-cont. (RMS) norm, investigate removing ggml_cont
  8054. kv_compressed = ggml_cont(ctx0, kv_compressed);
  8055. kv_compressed = build_norm(kv_compressed,
  8056. model.layers[il].attn_kv_a_norm, NULL,
  8057. LLM_NORM_RMS, il);
  8058. cb(kv_compressed, "kv_compressed", il);
  8059. // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
  8060. ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
  8061. cb(kv, "kv", il);
  8062. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  8063. ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
  8064. ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
  8065. ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  8066. 0);
  8067. cb(k_nope, "k_nope", il);
  8068. // and {n_head * n_embd_head_v, n_tokens}
  8069. ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
  8070. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  8071. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
  8072. ggml_row_size(kv->type, (n_embd_head_qk_nope)));
  8073. cb(v_states, "v_states", il);
  8074. v_states = ggml_cont(ctx0, v_states);
  8075. cb(v_states, "v_states", il);
  8076. v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,
  8077. ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),
  8078. 0);
  8079. cb(v_states, "v_states", il);
  8080. q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this
  8081. q_pe = ggml_rope_ext(
  8082. ctx0, q_pe, inp_pos, nullptr,
  8083. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8084. ext_factor, attn_factor_scaled, beta_fast, beta_slow
  8085. );
  8086. cb(q_pe, "q_pe", il);
  8087. // shared RoPE key
  8088. k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this
  8089. k_pe = ggml_rope_ext(
  8090. ctx0, k_pe, inp_pos, nullptr,
  8091. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8092. ext_factor, attn_factor_scaled, beta_fast, beta_slow
  8093. );
  8094. cb(k_pe, "k_pe", il);
  8095. ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);
  8096. cb(q_states, "q_states", il);
  8097. ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
  8098. cb(k_states, "k_states", il);
  8099. cur = build_attn(inp_attn, gf,
  8100. model.layers[il].wo, NULL,
  8101. q_states, k_states, v_states, nullptr, kq_scale, il);
  8102. }
  8103. if (il == n_layer - 1) {
  8104. // skip computing output for unused tokens
  8105. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8106. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8107. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8108. }
  8109. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8110. cb(ffn_inp, "ffn_inp", il);
  8111. cur = build_norm(ffn_inp,
  8112. model.layers[il].ffn_norm, NULL,
  8113. LLM_NORM_RMS, il);
  8114. cb(cur, "ffn_norm", il);
  8115. if ((uint32_t) il < hparams.n_layer_dense_lead) {
  8116. cur = build_ffn(cur,
  8117. model.layers[il].ffn_up, NULL, NULL,
  8118. model.layers[il].ffn_gate, NULL, NULL,
  8119. model.layers[il].ffn_down, NULL, NULL,
  8120. NULL,
  8121. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8122. cb(cur, "ffn_out", il);
  8123. } else {
  8124. // MoE branch
  8125. ggml_tensor * moe_out =
  8126. build_moe_ffn(cur,
  8127. model.layers[il].ffn_gate_inp,
  8128. model.layers[il].ffn_up_exps,
  8129. model.layers[il].ffn_gate_exps,
  8130. model.layers[il].ffn_down_exps,
  8131. model.layers[il].ffn_exp_probs_b,
  8132. n_expert, n_expert_used,
  8133. LLM_FFN_SILU, hparams.expert_weights_norm,
  8134. true, hparams.expert_weights_scale,
  8135. (llama_expert_gating_func_type) hparams.expert_gating_func,
  8136. il);
  8137. cb(moe_out, "ffn_moe_out", il);
  8138. // FFN shared expert
  8139. {
  8140. ggml_tensor * ffn_shexp = build_ffn(cur,
  8141. model.layers[il].ffn_up_shexp, NULL, NULL,
  8142. model.layers[il].ffn_gate_shexp, NULL, NULL,
  8143. model.layers[il].ffn_down_shexp, NULL, NULL,
  8144. NULL,
  8145. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8146. cb(ffn_shexp, "ffn_shexp", il);
  8147. cur = ggml_add(ctx0, moe_out, ffn_shexp);
  8148. cb(cur, "ffn_out", il);
  8149. }
  8150. }
  8151. cur = ggml_add(ctx0, cur, ffn_inp);
  8152. cur = build_cvec(cur, il);
  8153. cb(cur, "l_out", il);
  8154. // input for next layer
  8155. inpL = cur;
  8156. }
  8157. cur = inpL;
  8158. cur = build_norm(cur,
  8159. model.output_norm, NULL,
  8160. LLM_NORM_RMS, -1);
  8161. cb(cur, "result_norm", -1);
  8162. res->t_embd = cur;
  8163. // lm_head
  8164. cur = ggml_mul_mat(ctx0, model.output, cur);
  8165. cb(cur, "result_output", -1);
  8166. res->t_logits = cur;
  8167. ggml_build_forward_expand(gf, cur);
  8168. }
  8169. };
  8170. struct llm_build_bitnet : public llm_graph_context {
  8171. llm_build_bitnet(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8172. const int64_t n_embd_head = hparams.n_embd_head_v;
  8173. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8174. ggml_tensor * cur;
  8175. ggml_tensor * inpL;
  8176. inpL = build_inp_embd(model.tok_embd);
  8177. // inp_pos - contains the positions
  8178. ggml_tensor * inp_pos = build_inp_pos();
  8179. auto * inp_attn = build_attn_inp_kv_unified();
  8180. for (int il = 0; il < n_layer; ++il) {
  8181. ggml_tensor * inpSA = inpL;
  8182. cur = build_norm(inpL,
  8183. model.layers[il].attn_norm, NULL,
  8184. LLM_NORM_RMS, il);
  8185. cb(cur, "attn_norm", il);
  8186. // self-attention
  8187. {
  8188. // compute Q and K and RoPE them
  8189. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  8190. if (model.layers[il].wq_scale) {
  8191. Qcur = ggml_mul(ctx0, Qcur, model.layers[il].wq_scale);
  8192. }
  8193. cb(Qcur, "Qcur", il);
  8194. if (model.layers[il].bq) {
  8195. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8196. cb(Qcur, "Qcur", il);
  8197. }
  8198. // B1.K
  8199. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  8200. if (model.layers[il].wk_scale) {
  8201. Kcur = ggml_mul(ctx0, Kcur, model.layers[il].wk_scale);
  8202. }
  8203. cb(Kcur, "Kcur", il);
  8204. if (model.layers[il].bk) {
  8205. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8206. cb(Kcur, "Kcur", il);
  8207. }
  8208. // B1.V
  8209. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  8210. if (model.layers[il].wv_scale) {
  8211. Vcur = ggml_mul(ctx0, Vcur, model.layers[il].wv_scale);
  8212. }
  8213. cb(Vcur, "Vcur", il);
  8214. if (model.layers[il].bv) {
  8215. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8216. cb(Vcur, "Vcur", il);
  8217. }
  8218. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8219. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8220. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8221. Qcur = ggml_rope_ext(
  8222. ctx0, Qcur, inp_pos, nullptr,
  8223. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8224. ext_factor, attn_factor, beta_fast, beta_slow
  8225. );
  8226. Kcur = ggml_rope_ext(
  8227. ctx0, Kcur, inp_pos, nullptr,
  8228. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8229. ext_factor, attn_factor, beta_fast, beta_slow
  8230. );
  8231. cb(Qcur, "Qcur", il);
  8232. cb(Kcur, "Kcur", il);
  8233. cb(Vcur, "Vcur", il);
  8234. cur = build_attn(inp_attn, gf,
  8235. NULL, NULL,
  8236. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  8237. cur = build_norm(cur,
  8238. model.layers[il].attn_sub_norm, NULL,
  8239. LLM_NORM_RMS, il);
  8240. cb(cur, "attn_sub_norm", il);
  8241. cur = build_lora_mm(model.layers[il].wo, cur);
  8242. if (model.layers[il].wo_scale) {
  8243. cur = ggml_mul(ctx0, cur, model.layers[il].wo_scale);
  8244. }
  8245. if (model.layers[il].bo) {
  8246. cur = ggml_add(ctx0, cur, model.layers[il].bo);
  8247. }
  8248. cb(cur, "attn_o_out", il);
  8249. }
  8250. if (il == n_layer - 1) {
  8251. // skip computing output for unused tokens
  8252. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8253. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8254. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8255. }
  8256. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8257. cb(ffn_inp, "ffn_inp", il);
  8258. // feed-forward forward
  8259. cur = build_norm(ffn_inp,
  8260. model.layers[il].ffn_norm, NULL,
  8261. LLM_NORM_RMS, il);
  8262. cb(cur, "ffn_norm", il);
  8263. cur = build_ffn(cur,
  8264. model.layers[il].ffn_up, NULL, model.layers[il].ffn_up_scale,
  8265. model.layers[il].ffn_gate, NULL, model.layers[il].ffn_gate_scale,
  8266. NULL, NULL, NULL,
  8267. NULL,
  8268. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8269. cb(cur, "ffn_sub_out", il);
  8270. cur = build_norm(cur,
  8271. model.layers[il].ffn_sub_norm, NULL,
  8272. LLM_NORM_RMS, il);
  8273. cb(cur, "ffn_sub_norm", il);
  8274. cur = build_lora_mm(model.layers[il].ffn_down, cur);
  8275. if (model.layers[il].ffn_down_scale) {
  8276. cur = ggml_mul(ctx0, cur, model.layers[il].ffn_down_scale);
  8277. }
  8278. cb(cur, "ffn_down", il);
  8279. cur = ggml_add(ctx0, cur, ffn_inp);
  8280. cb(cur, "l_out", il);
  8281. // input for next layer
  8282. inpL = cur;
  8283. }
  8284. cur = inpL;
  8285. cur = build_norm(cur,
  8286. model.output_norm, NULL,
  8287. LLM_NORM_RMS, -1);
  8288. cb(cur, "result_norm", -1);
  8289. res->t_embd = cur;
  8290. // lm_head
  8291. // FIXME: do not use model.tok_embd directly, duplicate as model.output
  8292. cur = build_lora_mm(model.tok_embd, cur);
  8293. cb(cur, "result_output", -1);
  8294. res->t_logits = cur;
  8295. ggml_build_forward_expand(gf, cur);
  8296. }
  8297. };
  8298. struct llm_build_t5_enc : public llm_graph_context {
  8299. llm_build_t5_enc(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8300. const int64_t n_embd_head = hparams.n_embd_head_v;
  8301. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8302. ggml_tensor * cur;
  8303. ggml_tensor * inpL;
  8304. inpL = build_inp_embd(model.tok_embd);
  8305. ggml_tensor * pos_bucket_enc = build_inp_pos_bucket_enc();
  8306. auto * inp_attn = build_attn_inp_no_cache();
  8307. for (int il = 0; il < n_layer; ++il) {
  8308. ggml_tensor * inpSA = inpL;
  8309. // norm
  8310. cur = build_norm(inpL,
  8311. model.layers[il].attn_norm_enc, NULL,
  8312. LLM_NORM_RMS, il);
  8313. cb(cur, "attn_norm", il);
  8314. // self-attention
  8315. {
  8316. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq_enc, cur);
  8317. cb(Qcur, "Qcur", il);
  8318. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk_enc, cur);
  8319. cb(Kcur, "Kcur", il);
  8320. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv_enc, cur);
  8321. cb(Vcur, "Vcur", il);
  8322. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8323. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8324. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8325. ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b_enc ? model.layers[il].attn_rel_b_enc : model.layers[0].attn_rel_b_enc;
  8326. ggml_tensor * kq_b = build_pos_bias(pos_bucket_enc, attn_rel_b);
  8327. cur = build_attn(inp_attn, gf,
  8328. model.layers[il].wo_enc, nullptr,
  8329. Qcur, Kcur, Vcur, kq_b, 1.0f, il);
  8330. cb(cur, "kqv_out", il);
  8331. }
  8332. if (il == n_layer - 1) {
  8333. // skip computing output for unused tokens
  8334. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8335. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8336. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8337. }
  8338. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8339. cb(ffn_inp, "ffn_inp", il);
  8340. // feed-forward network
  8341. {
  8342. cur = build_norm(ffn_inp,
  8343. model.layers[il].ffn_norm_enc, NULL,
  8344. LLM_NORM_RMS, il);
  8345. cb(cur, "ffn_norm", il);
  8346. // T5 uses relu, flan-T5 uses gelu-gated
  8347. cur = build_ffn(cur,
  8348. model.layers[il].ffn_up_enc, NULL, NULL,
  8349. model.layers[il].ffn_gate_enc, NULL, NULL,
  8350. model.layers[il].ffn_down_enc, NULL, NULL,
  8351. NULL,
  8352. model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU,
  8353. model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ,
  8354. il);
  8355. cb(cur, "ffn_out", il);
  8356. }
  8357. cur = ggml_add(ctx0, cur, ffn_inp);
  8358. cb(cur, "ffn_out", il);
  8359. cur = build_cvec(cur, il);
  8360. cb(cur, "l_out", il);
  8361. // input for next layer
  8362. inpL = cur;
  8363. }
  8364. cur = inpL;
  8365. cb(cur, "result_embd", -1);
  8366. cur = build_norm(cur,
  8367. model.output_norm_enc, NULL,
  8368. LLM_NORM_RMS, -1);
  8369. cb(cur, "result_norm", -1);
  8370. res->t_embd = cur;
  8371. ggml_build_forward_expand(gf, cur);
  8372. }
  8373. };
  8374. struct llm_build_t5_dec : public llm_graph_context {
  8375. llm_build_t5_dec(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8376. const int64_t n_embd_head = hparams.n_embd_head_v;
  8377. //const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  8378. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8379. ggml_tensor * cur;
  8380. ggml_tensor * inpL;
  8381. inpL = build_inp_embd(model.tok_embd);
  8382. ggml_tensor * embd_enc = build_inp_cross_embd();
  8383. ggml_tensor * pos_bucket_dec = build_inp_pos_bucket_dec();
  8384. const int64_t n_outputs_enc = embd_enc->ne[1];
  8385. auto * inp_attn_self = build_attn_inp_kv_unified();
  8386. auto * inp_attn_cross = build_attn_inp_cross();
  8387. for (int il = 0; il < n_layer; ++il) {
  8388. ggml_tensor * inpSA = inpL;
  8389. // norm
  8390. cur = build_norm(inpL,
  8391. model.layers[il].attn_norm, NULL,
  8392. LLM_NORM_RMS, il);
  8393. cb(cur, "attn_norm", il);
  8394. // self-attention
  8395. {
  8396. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  8397. cb(Qcur, "Qcur", il);
  8398. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  8399. cb(Kcur, "Kcur", il);
  8400. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  8401. cb(Vcur, "Vcur", il);
  8402. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8403. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8404. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8405. ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b ? model.layers[il].attn_rel_b : model.layers[0].attn_rel_b;
  8406. ggml_tensor * kq_b = build_pos_bias(pos_bucket_dec, attn_rel_b);
  8407. cur = build_attn(inp_attn_self, gf,
  8408. model.layers[il].wo, model.layers[il].bo,
  8409. Qcur, Kcur, Vcur, kq_b, 1.0f, il);
  8410. cb(cur, "kqv_out", il);
  8411. }
  8412. cur = ggml_add(ctx0, cur, inpSA);
  8413. cb(cur, "cross_inp", il);
  8414. ggml_tensor * inpCA = cur;
  8415. // norm
  8416. cur = build_norm(cur,
  8417. model.layers[il].attn_norm_cross, NULL,
  8418. LLM_NORM_RMS, il);
  8419. cb(cur, "attn_norm_cross", il);
  8420. // cross-attention
  8421. {
  8422. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq_cross, cur);
  8423. cb(Qcur, "Qcur", il);
  8424. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk_cross, embd_enc);
  8425. cb(Kcur, "Kcur", il);
  8426. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv_cross, embd_enc);
  8427. cb(Vcur, "Vcur", il);
  8428. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8429. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_outputs_enc);
  8430. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_outputs_enc);
  8431. cur = build_attn(inp_attn_cross, gf,
  8432. model.layers[il].wo_cross, nullptr,
  8433. Qcur, Kcur, Vcur, nullptr, 1.0f, il);
  8434. cb(cur, "kqv_out", il);
  8435. //ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
  8436. //ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
  8437. //ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  8438. //cb(kq, "kq", il);
  8439. //kq = ggml_soft_max_ext(ctx0, kq, KQ_mask_cross, 1.0f, hparams.f_max_alibi_bias);
  8440. //cb(kq, "kq_soft_max_ext", il);
  8441. //ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_outputs_enc)));
  8442. //cb(v, "v", il);
  8443. //ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_outputs_enc, n_embd_head, n_head_kv), kq);
  8444. //cb(kqv, "kqv", il);
  8445. //ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  8446. //cb(kqv_merged, "kqv_merged", il);
  8447. //cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens);
  8448. //cb(cur, "kqv_merged_cont", il);
  8449. //ggml_build_forward_expand(gf, cur);
  8450. //cur = build_lora_mm(model.layers[il].wo_cross, cur);
  8451. //cb(cur, "kqv_out", il);
  8452. }
  8453. if (il == n_layer - 1) {
  8454. // skip computing output for unused tokens
  8455. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8456. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8457. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8458. inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids);
  8459. }
  8460. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpCA);
  8461. cb(ffn_inp, "ffn_inp", il);
  8462. // feed-forward network
  8463. {
  8464. cur = build_norm(ffn_inp,
  8465. model.layers[il].ffn_norm, NULL,
  8466. LLM_NORM_RMS, il);
  8467. cb(cur, "ffn_norm", il);
  8468. // T5 uses relu, flan-T5 uses gelu-gated
  8469. cur = build_ffn(cur,
  8470. model.layers[il].ffn_up, NULL, NULL,
  8471. model.layers[il].ffn_gate, NULL, NULL,
  8472. model.layers[il].ffn_down, NULL, NULL,
  8473. NULL,
  8474. model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU,
  8475. model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ,
  8476. il);
  8477. cb(cur, "ffn_out", il);
  8478. }
  8479. cur = ggml_add(ctx0, cur, ffn_inp);
  8480. cb(cur, "ffn_out", il);
  8481. cur = build_cvec(cur, il);
  8482. cb(cur, "l_out", il);
  8483. // input for next layer
  8484. inpL = cur;
  8485. }
  8486. cur = inpL;
  8487. cb(cur, "result_embd", -1);
  8488. cur = build_norm(cur,
  8489. model.output_norm, NULL,
  8490. LLM_NORM_RMS, -1);
  8491. cb(cur, "result_norm", -1);
  8492. res->t_embd = cur;
  8493. // lm_head
  8494. cur = build_lora_mm(model.output, cur);
  8495. cb(cur, "result_output", -1);
  8496. res->t_logits = cur;
  8497. ggml_build_forward_expand(gf, cur);
  8498. }
  8499. };
  8500. struct llm_build_jais : public llm_graph_context {
  8501. llm_build_jais(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8502. const int64_t n_embd_head = hparams.n_embd_head_v;
  8503. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  8504. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8505. ggml_tensor * cur;
  8506. ggml_tensor * inpL;
  8507. inpL = build_inp_embd(model.tok_embd);
  8508. auto * inp_attn = build_attn_inp_kv_unified();
  8509. for (int il = 0; il < n_layer; ++il) {
  8510. cur = build_norm(inpL,
  8511. model.layers[il].attn_norm,
  8512. model.layers[il].attn_norm_b,
  8513. LLM_NORM, il);
  8514. cb(cur, "attn_norm", il);
  8515. // self-attention
  8516. {
  8517. cur = build_lora_mm(model.layers[il].wqkv, cur);
  8518. cb(cur, "wqkv", il);
  8519. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  8520. cb(cur, "bqkv", il);
  8521. ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*cur->nb[0]*(n_embd)));
  8522. ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*cur->nb[0]*(n_embd)));
  8523. ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*cur->nb[0]*(n_embd + n_embd_gqa)));
  8524. cb(Qcur, "Qcur", il);
  8525. cb(Kcur, "Kcur", il);
  8526. cb(Vcur, "Vcur", il);
  8527. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8528. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8529. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8530. cur = build_attn(inp_attn, gf,
  8531. model.layers[il].wo, model.layers[il].bo,
  8532. Qcur, Kcur, Vcur, nullptr, 1.0f/float(n_embd_head), il);
  8533. }
  8534. if (il == n_layer - 1) {
  8535. // skip computing output for unused tokens
  8536. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8537. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8538. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8539. }
  8540. // add the input
  8541. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  8542. cb(ffn_inp, "ffn_inp", il);
  8543. // FF
  8544. {
  8545. cur = build_norm(ffn_inp,
  8546. model.layers[il].ffn_norm,
  8547. model.layers[il].ffn_norm_b,
  8548. LLM_NORM, il);
  8549. cb(cur, "ffn_norm", il);
  8550. cur = build_ffn(cur,
  8551. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  8552. model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
  8553. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  8554. NULL,
  8555. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8556. cb(cur, "ffn_out", il);
  8557. }
  8558. inpL = ggml_add(ctx0, cur, ffn_inp);
  8559. cb(inpL, "l_out", il);
  8560. }
  8561. cur = build_norm(inpL,
  8562. model.output_norm,
  8563. model.output_norm_b,
  8564. LLM_NORM, -1);
  8565. cb(cur, "result_norm", -1);
  8566. res->t_embd = cur;
  8567. cur = build_lora_mm(model.output, cur);
  8568. cb(cur, "result_output", -1);
  8569. res->t_logits = cur;
  8570. ggml_build_forward_expand(gf, cur);
  8571. }
  8572. };
  8573. struct llm_build_chatglm : public llm_graph_context {
  8574. llm_build_chatglm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8575. const int64_t n_embd_head = hparams.n_embd_head_v;
  8576. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  8577. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8578. ggml_tensor * cur;
  8579. ggml_tensor * inpL;
  8580. inpL = build_inp_embd(model.tok_embd);
  8581. // inp_pos - contains the positions
  8582. ggml_tensor * inp_pos = build_inp_pos();
  8583. auto * inp_attn = build_attn_inp_kv_unified();
  8584. for (int il = 0; il < n_layer; ++il) {
  8585. ggml_tensor * inpSA = inpL;
  8586. cur = build_norm(inpL,
  8587. model.layers[il].attn_norm,
  8588. NULL,
  8589. LLM_NORM_RMS, il);
  8590. cb(cur, "attn_norm", il);
  8591. // self-attention
  8592. {
  8593. ggml_tensor * Qcur = nullptr;
  8594. ggml_tensor * Kcur = nullptr;
  8595. ggml_tensor * Vcur = nullptr;
  8596. if (model.layers[il].wqkv == nullptr) {
  8597. Qcur = build_lora_mm(model.layers[il].wq, cur);
  8598. if (model.layers[il].bq) {
  8599. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8600. }
  8601. Kcur = build_lora_mm(model.layers[il].wk, cur);
  8602. if (model.layers[il].bk) {
  8603. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8604. }
  8605. Vcur = build_lora_mm(model.layers[il].wv, cur);
  8606. if (model.layers[il].bv) {
  8607. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8608. }
  8609. } else {
  8610. cur = build_lora_mm(model.layers[il].wqkv, cur);
  8611. cb(cur, "wqkv", il);
  8612. if (model.layers[il].bqkv) {
  8613. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  8614. cb(cur, "bqkv", il);
  8615. }
  8616. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  8617. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  8618. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  8619. }
  8620. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8621. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8622. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8623. //printf("freq_base: %f freq_scale: %f ext_factor: %f attn_factor: %f\n", freq_base, freq_scale, ext_factor, attn_factor);
  8624. Qcur = ggml_rope_ext(
  8625. ctx0, Qcur, inp_pos, nullptr,
  8626. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8627. ext_factor, attn_factor, beta_fast, beta_slow
  8628. );
  8629. Kcur = ggml_rope_ext(
  8630. ctx0, Kcur, inp_pos, nullptr,
  8631. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8632. ext_factor, attn_factor, beta_fast, beta_slow
  8633. );
  8634. cb(Qcur, "Qcur", il);
  8635. cb(Kcur, "Kcur", il);
  8636. cb(Vcur, "Vcur", il);
  8637. cur = build_attn(inp_attn, gf,
  8638. model.layers[il].wo, NULL,
  8639. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  8640. }
  8641. if (il == n_layer - 1) {
  8642. // skip computing output for unused tokens
  8643. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8644. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8645. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8646. }
  8647. // Add the input
  8648. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8649. cb(ffn_inp, "ffn_inp", il);
  8650. // FF
  8651. {
  8652. cur = build_norm(ffn_inp,
  8653. model.layers[il].ffn_norm,
  8654. NULL,
  8655. LLM_NORM_RMS, il);
  8656. cb(cur, "ffn_norm", il);
  8657. cur = build_ffn(cur,
  8658. model.layers[il].ffn_up, NULL, NULL,
  8659. NULL, NULL, NULL,
  8660. model.layers[il].ffn_down, NULL, NULL,
  8661. NULL,
  8662. LLM_FFN_SWIGLU, LLM_FFN_SEQ, il);
  8663. cb(cur, "ffn_out", il);
  8664. }
  8665. inpL = ggml_add(ctx0, cur, ffn_inp);
  8666. cb(inpL, "l_out", il);
  8667. }
  8668. cur = build_norm(inpL,
  8669. model.output_norm,
  8670. NULL,
  8671. LLM_NORM_RMS, -1);
  8672. cb(cur, "result_norm", -1);
  8673. res->t_embd = cur;
  8674. cur = build_lora_mm(model.output, cur);
  8675. cb(cur, "result_output", -1);
  8676. res->t_logits = cur;
  8677. ggml_build_forward_expand(gf, cur);
  8678. }
  8679. };
  8680. struct llm_build_glm4 : public llm_graph_context {
  8681. llm_build_glm4(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8682. const int64_t n_embd_head = hparams.n_embd_head_v;
  8683. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  8684. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8685. ggml_tensor * cur;
  8686. ggml_tensor * inpL;
  8687. inpL = build_inp_embd(model.tok_embd);
  8688. // inp_pos - contains the positions
  8689. ggml_tensor * inp_pos = build_inp_pos();
  8690. auto * inp_attn = build_attn_inp_kv_unified();
  8691. for (int il = 0; il < n_layer; ++il) {
  8692. ggml_tensor * inpSA = inpL;
  8693. // Pre-attention norm
  8694. cur = build_norm(inpL,
  8695. model.layers[il].attn_norm,
  8696. NULL,
  8697. LLM_NORM_RMS, il);
  8698. cb(cur, "attn_norm", il);
  8699. // self-attention
  8700. {
  8701. ggml_tensor * Qcur = nullptr;
  8702. ggml_tensor * Kcur = nullptr;
  8703. ggml_tensor * Vcur = nullptr;
  8704. if (model.layers[il].wqkv == nullptr) {
  8705. Qcur = build_lora_mm(model.layers[il].wq, cur);
  8706. if (model.layers[il].bq) {
  8707. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8708. }
  8709. Kcur = build_lora_mm(model.layers[il].wk, cur);
  8710. if (model.layers[il].bk) {
  8711. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8712. }
  8713. Vcur = build_lora_mm(model.layers[il].wv, cur);
  8714. if (model.layers[il].bv) {
  8715. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8716. }
  8717. } else {
  8718. cur = build_lora_mm(model.layers[il].wqkv, cur);
  8719. cb(cur, "wqkv", il);
  8720. if (model.layers[il].bqkv) {
  8721. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  8722. cb(cur, "bqkv", il);
  8723. }
  8724. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  8725. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  8726. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  8727. }
  8728. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8729. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8730. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8731. Qcur = ggml_rope_ext(
  8732. ctx0, Qcur, inp_pos, nullptr,
  8733. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8734. ext_factor, attn_factor, beta_fast, beta_slow
  8735. );
  8736. Kcur = ggml_rope_ext(
  8737. ctx0, Kcur, inp_pos, nullptr,
  8738. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8739. ext_factor, attn_factor, beta_fast, beta_slow
  8740. );
  8741. cb(Qcur, "Qcur", il);
  8742. cb(Kcur, "Kcur", il);
  8743. cb(Vcur, "Vcur", il);
  8744. cur = build_attn(inp_attn, gf,
  8745. model.layers[il].wo, NULL,
  8746. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  8747. }
  8748. if (il == n_layer - 1) {
  8749. // skip computing output for unused tokens
  8750. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8751. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8752. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8753. }
  8754. // Post-attention norm (new!)
  8755. cur = build_norm(cur,
  8756. model.layers[il].attn_post_norm,
  8757. NULL,
  8758. LLM_NORM_RMS, il);
  8759. cb(cur, "post_attn_norm", il);
  8760. // Add the input (residual connection after post-attention norm)
  8761. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8762. cb(ffn_inp, "ffn_inp", il);
  8763. // FF
  8764. {
  8765. // Pre-MLP norm
  8766. cur = build_norm(ffn_inp,
  8767. model.layers[il].ffn_norm,
  8768. NULL,
  8769. LLM_NORM_RMS, il);
  8770. cb(cur, "ffn_norm", il);
  8771. // MLP
  8772. cur = build_ffn(cur,
  8773. model.layers[il].ffn_up, NULL, NULL,
  8774. NULL, NULL, NULL,
  8775. model.layers[il].ffn_down, NULL, NULL,
  8776. NULL,
  8777. LLM_FFN_SWIGLU, LLM_FFN_SEQ, il);
  8778. cb(cur, "ffn_out", il);
  8779. // Post-MLP norm
  8780. cur = build_norm(cur,
  8781. model.layers[il].ffn_post_norm,
  8782. NULL,
  8783. LLM_NORM_RMS, il);
  8784. cb(cur, "post_mlp_norm", il);
  8785. }
  8786. // Add residual connection after post-MLP norm
  8787. inpL = ggml_add(ctx0, cur, ffn_inp);
  8788. cb(inpL, "l_out", il);
  8789. }
  8790. // Final norm
  8791. cur = build_norm(inpL,
  8792. model.output_norm,
  8793. NULL,
  8794. LLM_NORM_RMS, -1);
  8795. cb(cur, "result_norm", -1);
  8796. res->t_embd = cur;
  8797. // Output projection
  8798. cur = build_lora_mm(model.output, cur);
  8799. cb(cur, "result_output", -1);
  8800. res->t_logits = cur;
  8801. ggml_build_forward_expand(gf, cur);
  8802. }
  8803. };
  8804. struct llm_build_nemotron : public llm_graph_context {
  8805. llm_build_nemotron(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8806. const int64_t n_embd_head = hparams.n_embd_head_v;
  8807. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8808. //GGML_ASSERT(n_embd_head == hparams.n_rot);
  8809. ggml_tensor * cur;
  8810. ggml_tensor * inpL;
  8811. inpL = build_inp_embd(model.tok_embd);
  8812. // inp_pos - contains the positions
  8813. ggml_tensor * inp_pos = build_inp_pos();
  8814. auto * inp_attn = build_attn_inp_kv_unified();
  8815. for (int il = 0; il < n_layer; ++il) {
  8816. ggml_tensor * inpSA = inpL;
  8817. // norm
  8818. cur = build_norm(inpL,
  8819. model.layers[il].attn_norm,
  8820. model.layers[il].attn_norm_b,
  8821. LLM_NORM, il);
  8822. cb(cur, "attn_norm", il);
  8823. // self-attention
  8824. {
  8825. // compute Q and K and RoPE them
  8826. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  8827. cb(Qcur, "Qcur", il);
  8828. if (model.layers[il].bq) {
  8829. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8830. cb(Qcur, "Qcur", il);
  8831. }
  8832. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  8833. cb(Kcur, "Kcur", il);
  8834. if (model.layers[il].bk) {
  8835. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8836. cb(Kcur, "Kcur", il);
  8837. }
  8838. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  8839. cb(Vcur, "Vcur", il);
  8840. if (model.layers[il].bv) {
  8841. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8842. cb(Vcur, "Vcur", il);
  8843. }
  8844. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8845. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8846. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8847. Qcur = ggml_rope_ext(
  8848. ctx0, Qcur, inp_pos, nullptr,
  8849. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8850. ext_factor, attn_factor, beta_fast, beta_slow
  8851. );
  8852. Kcur = ggml_rope_ext(
  8853. ctx0, Kcur, inp_pos, nullptr,
  8854. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8855. ext_factor, attn_factor, beta_fast, beta_slow
  8856. );
  8857. cb(Qcur, "Qcur", il);
  8858. cb(Kcur, "Kcur", il);
  8859. cb(Vcur, "Vcur", il);
  8860. cur = build_attn(inp_attn, gf,
  8861. model.layers[il].wo, model.layers[il].bo,
  8862. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  8863. }
  8864. if (il == n_layer - 1) {
  8865. // skip computing output for unused tokens
  8866. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8867. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8868. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8869. }
  8870. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8871. cb(ffn_inp, "ffn_inp", il);
  8872. // feed-forward network
  8873. cur = build_norm(ffn_inp,
  8874. model.layers[il].ffn_norm,
  8875. model.layers[il].ffn_norm_b,
  8876. LLM_NORM, il);
  8877. cb(cur, "ffn_norm", il);
  8878. cur = build_ffn(cur,
  8879. model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  8880. NULL, NULL, NULL,
  8881. model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  8882. NULL,
  8883. LLM_FFN_RELU_SQR, LLM_FFN_SEQ, il);
  8884. cur = ggml_add(ctx0, cur, ffn_inp);
  8885. cb(cur, "ffn_out", il);
  8886. cur = build_cvec(cur, il);
  8887. cb(cur, "l_out", il);
  8888. // input for next layer
  8889. inpL = cur;
  8890. }
  8891. cur = inpL;
  8892. cur = build_norm(cur,
  8893. model.output_norm, model.output_norm_b,
  8894. LLM_NORM, -1);
  8895. cb(cur, "result_norm", -1);
  8896. res->t_embd = cur;
  8897. // lm_head
  8898. cur = build_lora_mm(model.output, cur);
  8899. cb(cur, "result_output", -1);
  8900. res->t_logits = cur;
  8901. ggml_build_forward_expand(gf, cur);
  8902. }
  8903. };
  8904. struct llm_build_exaone : public llm_graph_context {
  8905. llm_build_exaone(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  8906. const int64_t n_embd_head = hparams.n_embd_head_v;
  8907. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8908. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8909. ggml_tensor * cur;
  8910. ggml_tensor * inpL;
  8911. inpL = build_inp_embd(model.tok_embd);
  8912. // inp_pos - contains the positions
  8913. ggml_tensor * inp_pos = build_inp_pos();
  8914. auto * inp_attn = build_attn_inp_kv_unified();
  8915. for (int il = 0; il < n_layer; ++il) {
  8916. ggml_tensor * inpSA = inpL;
  8917. // norm
  8918. cur = build_norm(inpL,
  8919. model.layers[il].attn_norm, NULL,
  8920. LLM_NORM_RMS, il);
  8921. cb(cur, "attn_norm", il);
  8922. // self-attention
  8923. {
  8924. // rope freq factors for llama3; may return nullptr for llama2 and other models
  8925. ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il);
  8926. // compute Q and K and RoPE them
  8927. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  8928. cb(Qcur, "Qcur", il);
  8929. if (model.layers[il].bq) {
  8930. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8931. cb(Qcur, "Qcur", il);
  8932. }
  8933. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  8934. cb(Kcur, "Kcur", il);
  8935. if (model.layers[il].bk) {
  8936. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8937. cb(Kcur, "Kcur", il);
  8938. }
  8939. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  8940. cb(Vcur, "Vcur", il);
  8941. if (model.layers[il].bv) {
  8942. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8943. cb(Vcur, "Vcur", il);
  8944. }
  8945. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8946. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8947. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  8948. Qcur = ggml_rope_ext(
  8949. ctx0, Qcur, inp_pos, rope_factors,
  8950. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8951. ext_factor, attn_factor, beta_fast, beta_slow
  8952. );
  8953. Kcur = ggml_rope_ext(
  8954. ctx0, Kcur, inp_pos, rope_factors,
  8955. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8956. ext_factor, attn_factor, beta_fast, beta_slow
  8957. );
  8958. cb(Qcur, "Qcur", il);
  8959. cb(Kcur, "Kcur", il);
  8960. cb(Vcur, "Vcur", il);
  8961. cur = build_attn(inp_attn, gf,
  8962. model.layers[il].wo, model.layers[il].bo,
  8963. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  8964. }
  8965. if (il == n_layer - 1) {
  8966. // skip computing output for unused tokens
  8967. ggml_tensor * inp_out_ids = build_inp_out_ids();
  8968. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8969. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8970. }
  8971. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8972. cb(ffn_inp, "ffn_inp", il);
  8973. // feed-forward network
  8974. cur = build_norm(ffn_inp,
  8975. model.layers[il].ffn_norm, NULL,
  8976. LLM_NORM_RMS, il);
  8977. cb(cur, "ffn_norm", il);
  8978. cur = build_ffn(cur,
  8979. model.layers[il].ffn_up, NULL, NULL,
  8980. model.layers[il].ffn_gate, NULL, NULL,
  8981. model.layers[il].ffn_down, NULL, NULL,
  8982. NULL,
  8983. LLM_FFN_SILU, LLM_FFN_PAR, il);
  8984. cb(cur, "ffn_out", il);
  8985. cur = ggml_add(ctx0, cur, ffn_inp);
  8986. cb(cur, "ffn_out", il);
  8987. cur = build_cvec(cur, il);
  8988. cb(cur, "l_out", il);
  8989. // input for next layer
  8990. inpL = cur;
  8991. }
  8992. cur = inpL;
  8993. cur = build_norm(cur,
  8994. model.output_norm, NULL,
  8995. LLM_NORM_RMS, -1);
  8996. cb(cur, "result_norm", -1);
  8997. res->t_embd = cur;
  8998. // lm_head
  8999. cur = build_lora_mm(model.output, cur);
  9000. cb(cur, "result_output", -1);
  9001. res->t_logits = cur;
  9002. ggml_build_forward_expand(gf, cur);
  9003. }
  9004. };
  9005. struct llm_build_rwkv6_base : public llm_graph_context {
  9006. const llama_model & model;
  9007. llm_build_rwkv6_base(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params), model(model) {
  9008. }
  9009. ggml_tensor * build_rwkv6_channel_mix(
  9010. const llama_layer * layer,
  9011. ggml_tensor * cur,
  9012. ggml_tensor * x_prev,
  9013. llm_arch arch) const {
  9014. ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur);
  9015. switch (arch) {
  9016. case LLM_ARCH_RWKV6:
  9017. {
  9018. ggml_tensor * xk = ggml_add(ctx0, ggml_mul(ctx0, sx, layer->channel_mix_lerp_k), cur);
  9019. ggml_tensor * xr = ggml_add(ctx0, ggml_mul(ctx0, sx, layer->channel_mix_lerp_r), cur);
  9020. ggml_tensor * r = ggml_sigmoid(ctx0, build_lora_mm(layer->channel_mix_receptance, xr));
  9021. ggml_tensor * k = ggml_sqr(
  9022. ctx0,
  9023. ggml_relu(
  9024. ctx0,
  9025. build_lora_mm(layer->channel_mix_key, xk)
  9026. )
  9027. );
  9028. cur = ggml_mul(ctx0, r, build_lora_mm(layer->channel_mix_value, k));
  9029. } break;
  9030. default:
  9031. GGML_ABORT("fatal error");
  9032. }
  9033. return cur;
  9034. }
  9035. ggml_tensor * build_rwkv6_time_mix(
  9036. ggml_cgraph * gf,
  9037. ggml_tensor * cur,
  9038. ggml_tensor * x_prev,
  9039. ggml_tensor * state_copy,
  9040. ggml_tensor * state_mask,
  9041. const llama_ubatch & ubatch,
  9042. int il) const {
  9043. const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory);
  9044. const auto n_tokens = ubatch.n_tokens;
  9045. const auto n_seqs = ubatch.n_seqs;
  9046. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9047. const auto n_embd = hparams.n_embd;
  9048. const auto head_size = hparams.wkv_head_size;
  9049. const auto n_head = n_embd / head_size;
  9050. const auto n_head_kv = hparams.n_head_kv(il);
  9051. const auto kv_head = kv_self->head;
  9052. const auto & layer = model.layers[il];
  9053. bool is_qrwkv = layer.time_mix_first == nullptr;
  9054. ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur);
  9055. sx = ggml_reshape_2d(ctx0, sx, n_embd, n_tokens);
  9056. cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
  9057. ggml_tensor * xxx = ggml_add(ctx0, ggml_mul(ctx0, sx, layer.time_mix_lerp_x), cur);
  9058. xxx = ggml_reshape_4d(
  9059. ctx0,
  9060. ggml_tanh(
  9061. ctx0,
  9062. ggml_mul_mat(ctx0, layer.time_mix_w1, xxx)
  9063. ),
  9064. layer.time_mix_w1->ne[1] / 5, 1, 5, n_tokens
  9065. );
  9066. xxx = ggml_cont(ctx0, ggml_permute(ctx0, xxx, 0, 1, 3, 2));
  9067. xxx = ggml_mul_mat(
  9068. ctx0,
  9069. ggml_reshape_4d(
  9070. ctx0,
  9071. layer.time_mix_w2,
  9072. layer.time_mix_w2->ne[0], layer.time_mix_w2->ne[1], 1, 5
  9073. ),
  9074. xxx
  9075. );
  9076. ggml_tensor *xw, *xk, *xv, *xr, *xg;
  9077. if (layer.time_mix_lerp_fused) {
  9078. // fusing these weights makes some performance improvement
  9079. sx = ggml_reshape_3d(ctx0, sx, n_embd, 1, n_tokens);
  9080. cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens);
  9081. xxx = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xxx, layer.time_mix_lerp_fused), sx), cur);
  9082. xw = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], 0);
  9083. xk = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float));
  9084. xv = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float));
  9085. xr = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float));
  9086. xg = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float));
  9087. } else {
  9088. // for backward compatibility
  9089. xw = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], 0);
  9090. xk = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float));
  9091. xv = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float));
  9092. xr = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float));
  9093. xg = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float));
  9094. xw = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xw, layer.time_mix_lerp_w), sx), cur);
  9095. xk = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xk, layer.time_mix_lerp_k), sx), cur);
  9096. xv = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xv, layer.time_mix_lerp_v), sx), cur);
  9097. xr = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xr, layer.time_mix_lerp_r), sx), cur);
  9098. xg = ggml_add(ctx0, ggml_mul(ctx0, ggml_add(ctx0, xg, layer.time_mix_lerp_g), sx), cur);
  9099. }
  9100. ggml_tensor * r = build_lora_mm(layer.time_mix_receptance, xr);
  9101. ggml_tensor * k = build_lora_mm(layer.time_mix_key, xk);
  9102. ggml_tensor * v = build_lora_mm(layer.time_mix_value, xv);
  9103. if (layer.time_mix_receptance_b) {
  9104. r = ggml_add(ctx0, r, layer.time_mix_receptance_b);
  9105. }
  9106. if (layer.time_mix_key_b) {
  9107. k = ggml_add(ctx0, k, layer.time_mix_key_b);
  9108. }
  9109. if (layer.time_mix_value_b) {
  9110. v = ggml_add(ctx0, v, layer.time_mix_value_b);
  9111. }
  9112. ggml_tensor * g = build_lora_mm(layer.time_mix_gate, xg);
  9113. if (is_qrwkv) {
  9114. g = ggml_sigmoid(ctx0, g);
  9115. } else {
  9116. g = ggml_silu(ctx0, g);
  9117. }
  9118. if (n_head_kv != 0 && n_head_kv != n_head) {
  9119. GGML_ASSERT(n_head % n_head_kv == 0);
  9120. k = ggml_reshape_4d(ctx0, k, head_size, 1, n_head_kv, n_tokens);
  9121. v = ggml_reshape_4d(ctx0, v, head_size, 1, n_head_kv, n_tokens);
  9122. ggml_tensor * tmp = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, head_size, n_head / n_head_kv, n_head_kv, n_tokens);
  9123. k = ggml_repeat(ctx0, k, tmp);
  9124. v = ggml_repeat(ctx0, v, tmp);
  9125. }
  9126. k = ggml_reshape_3d(ctx0, k, head_size, n_head, n_tokens);
  9127. v = ggml_reshape_3d(ctx0, v, head_size, n_head, n_tokens);
  9128. r = ggml_reshape_3d(ctx0, r, head_size, n_head, n_tokens);
  9129. ggml_tensor * w = ggml_mul_mat(
  9130. ctx0,
  9131. layer.time_mix_decay_w2,
  9132. ggml_tanh(
  9133. ctx0,
  9134. ggml_mul_mat(ctx0, layer.time_mix_decay_w1, xw)
  9135. )
  9136. );
  9137. w = ggml_add(ctx0, w, layer.time_mix_decay);
  9138. w = ggml_exp(ctx0, ggml_neg(ctx0, ggml_exp(ctx0, w)));
  9139. w = ggml_reshape_3d(ctx0, w, head_size, n_head, n_tokens);
  9140. if (is_qrwkv) {
  9141. // k = k * (1 - w)
  9142. k = ggml_sub(ctx0, k, ggml_mul(ctx0, k, w));
  9143. }
  9144. ggml_tensor * wkv_state = build_copy_mask_state(
  9145. gf, kv_self->v_l[il], state_copy, state_mask,
  9146. hparams.n_embd_v_s(), n_seqs);
  9147. ggml_tensor * wkv_output;
  9148. if (is_qrwkv) {
  9149. wkv_output = ggml_gated_linear_attn(ctx0, k, v, r, w, wkv_state, pow(head_size, -0.5f));
  9150. } else {
  9151. wkv_output = ggml_rwkv_wkv6(ctx0, k, v, r, layer.time_mix_first, w, wkv_state);
  9152. }
  9153. cur = ggml_view_1d(ctx0, wkv_output, n_embd * n_tokens, 0);
  9154. wkv_state = ggml_view_1d(ctx0, wkv_output, n_embd * head_size * n_seqs, n_embd * n_tokens * sizeof(float));
  9155. ggml_build_forward_expand(
  9156. gf,
  9157. ggml_cpy(
  9158. ctx0,
  9159. wkv_state,
  9160. ggml_view_1d(
  9161. ctx0,
  9162. kv_self->v_l[il],
  9163. hparams.n_embd_v_s() * n_seqs,
  9164. hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self->v_l[il])
  9165. )
  9166. )
  9167. );
  9168. if (!is_qrwkv) {
  9169. // group norm with head_count groups
  9170. cur = ggml_reshape_3d(ctx0, cur, n_embd / n_head, n_head, n_tokens);
  9171. cur = ggml_norm(ctx0, cur, 64e-5f);
  9172. // Convert back to regular vectors.
  9173. cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
  9174. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, layer.time_mix_ln), layer.time_mix_ln_b);
  9175. } else {
  9176. cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
  9177. }
  9178. cur = ggml_mul(ctx0, cur, g);
  9179. cur = build_lora_mm(layer.time_mix_output, cur);
  9180. return ggml_reshape_3d(ctx0, cur, n_embd, n_seq_tokens, n_seqs);
  9181. }
  9182. };
  9183. struct llm_build_rwkv6 : public llm_build_rwkv6_base {
  9184. llm_build_rwkv6(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) {
  9185. GGML_ASSERT(hparams.token_shift_count == 2);
  9186. ggml_tensor * cur;
  9187. ggml_tensor * inpL;
  9188. inpL = build_inp_embd(model.tok_embd);
  9189. inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1);
  9190. ggml_tensor * state_copy = build_inp_s_copy();
  9191. ggml_tensor * state_mask = build_inp_s_mask();
  9192. const auto n_embd = hparams.n_embd;
  9193. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9194. const auto n_seqs = ubatch.n_seqs;
  9195. for (int il = 0; il < n_layer; ++il) {
  9196. const llama_layer * layer = &model.layers[il];
  9197. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
  9198. ggml_tensor * token_shift = build_rwkv_token_shift_load(
  9199. gf, state_copy, state_mask, ubatch, il
  9200. );
  9201. ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0);
  9202. ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift));
  9203. ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM, il);
  9204. cb(att_norm, "attn_norm", il);
  9205. ggml_tensor * x_prev = ggml_concat(
  9206. ctx0,
  9207. att_shift,
  9208. ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0),
  9209. 1
  9210. );
  9211. cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, state_mask, ubatch, il);
  9212. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  9213. cb(ffn_inp, "ffn_inp", il);
  9214. ggml_tensor * ffn_norm = build_norm(ffn_inp, layer->attn_norm_2, layer->attn_norm_2_b, LLM_NORM, il);
  9215. cb(ffn_norm, "ffn_norm", il);
  9216. x_prev = ggml_concat(
  9217. ctx0,
  9218. ffn_shift,
  9219. ggml_view_3d(ctx0, ffn_norm, n_embd, n_seq_tokens - 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], 0),
  9220. 1
  9221. );
  9222. token_shift = ggml_concat(ctx0,
  9223. ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)),
  9224. ggml_view_3d(ctx0, ffn_norm, n_embd, 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(ffn_norm)),
  9225. 1
  9226. );
  9227. ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
  9228. if (il == n_layer - 1) {
  9229. // skip computing output for unused tokens
  9230. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9231. ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
  9232. ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids);
  9233. x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids);
  9234. cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids);
  9235. }
  9236. cur = build_rwkv6_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV6);
  9237. cur = ggml_add(ctx0, cur, ffn_inp);
  9238. if (hparams.rescale_every_n_layers != 0 && (il + 1) % hparams.rescale_every_n_layers == 0) {
  9239. cur = ggml_scale(ctx0, cur, 0.5F);
  9240. }
  9241. cur = build_cvec(cur, il);
  9242. cb(cur, "l_out", il);
  9243. // input for next layer
  9244. inpL = cur;
  9245. }
  9246. cur = inpL;
  9247. cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM, -1);
  9248. cb(cur, "result_norm", -1);
  9249. res->t_embd = cur;
  9250. cur = build_lora_mm(model.output, cur);
  9251. cb(cur, "result_output", -1);
  9252. res->t_logits = cur;
  9253. ggml_build_forward_expand(gf, cur);
  9254. }
  9255. };
  9256. // ref: https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1/blob/main/modeling_rwkv6qwen2.py
  9257. struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base {
  9258. llm_build_rwkv6qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) {
  9259. GGML_ASSERT(n_embd == hparams.n_embd_k_s());
  9260. ggml_tensor * cur;
  9261. ggml_tensor * inpL;
  9262. inpL = build_inp_embd(model.tok_embd);
  9263. ggml_tensor * state_copy = build_inp_s_copy();
  9264. ggml_tensor * state_mask = build_inp_s_mask();
  9265. const auto n_embd = hparams.n_embd;
  9266. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9267. const auto n_seqs = ubatch.n_seqs;
  9268. for (int il = 0; il < n_layer; ++il) {
  9269. const llama_layer * layer = &model.layers[il];
  9270. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
  9271. ggml_tensor * token_shift = build_rwkv_token_shift_load(
  9272. gf, state_copy, state_mask, ubatch, il
  9273. );
  9274. ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il);
  9275. cb(att_norm, "attn_norm", il);
  9276. ggml_tensor * x_prev = ggml_concat(
  9277. ctx0,
  9278. token_shift,
  9279. ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0),
  9280. 1
  9281. );
  9282. cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, state_mask, ubatch, il);
  9283. token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm));
  9284. ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
  9285. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  9286. cb(ffn_inp, "ffn_inp", il);
  9287. if (il == n_layer - 1) {
  9288. // skip computing output for unused tokens
  9289. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9290. cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids);
  9291. ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
  9292. }
  9293. // feed-forward network
  9294. cur = build_norm(ffn_inp,
  9295. model.layers[il].ffn_norm, NULL,
  9296. LLM_NORM_RMS, il);
  9297. cb(cur, "ffn_norm", il);
  9298. cur = build_ffn(cur,
  9299. model.layers[il].ffn_up, NULL, NULL,
  9300. model.layers[il].ffn_gate, NULL, NULL,
  9301. model.layers[il].ffn_down, NULL, NULL,
  9302. NULL,
  9303. LLM_FFN_SILU, LLM_FFN_PAR, il);
  9304. cb(cur, "ffn_out", il);
  9305. cur = ggml_add(ctx0, cur, ffn_inp);
  9306. cur = build_cvec(cur, il);
  9307. cb(cur, "l_out", il);
  9308. // input for next layer
  9309. inpL = cur;
  9310. }
  9311. cur = inpL;
  9312. cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM_RMS, -1);
  9313. cb(cur, "result_norm", -1);
  9314. res->t_embd = cur;
  9315. cur = build_lora_mm(model.output, cur);
  9316. cb(cur, "result_output", -1);
  9317. res->t_logits = cur;
  9318. ggml_build_forward_expand(gf, cur);
  9319. }
  9320. };
  9321. struct llm_build_rwkv7_base : public llm_graph_context {
  9322. const llama_model & model;
  9323. llm_build_rwkv7_base(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params), model(model) {
  9324. }
  9325. ggml_tensor * build_rwkv7_channel_mix(
  9326. const llama_layer * layer,
  9327. ggml_tensor * cur,
  9328. ggml_tensor * x_prev,
  9329. llm_arch arch) const {
  9330. ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur);
  9331. switch (arch) {
  9332. case LLM_ARCH_RWKV7:
  9333. {
  9334. ggml_tensor * xk = ggml_add(ctx0, ggml_mul(ctx0, sx, layer->channel_mix_lerp_k), cur);
  9335. ggml_tensor * k = ggml_sqr(
  9336. ctx0,
  9337. ggml_relu(
  9338. ctx0,
  9339. build_lora_mm(layer->channel_mix_key, xk)
  9340. )
  9341. );
  9342. cur = build_lora_mm(layer->channel_mix_value, k);
  9343. } break;
  9344. default:
  9345. GGML_ABORT("fatal error");
  9346. }
  9347. return cur;
  9348. }
  9349. ggml_tensor * build_rwkv7_time_mix(
  9350. ggml_cgraph * gf,
  9351. ggml_tensor * cur,
  9352. ggml_tensor * x_prev,
  9353. ggml_tensor * state_copy,
  9354. ggml_tensor * state_mask,
  9355. ggml_tensor *& first_layer_value,
  9356. const llama_ubatch & ubatch,
  9357. int il) const {
  9358. const llama_kv_cache_unified * kv_self = static_cast<const llama_kv_cache_unified *>(memory);
  9359. const auto n_tokens = ubatch.n_tokens;
  9360. const auto n_seqs = ubatch.n_seqs;
  9361. const auto n_embd = hparams.n_embd;
  9362. const auto head_size = hparams.wkv_head_size;
  9363. const auto head_count = n_embd / head_size;
  9364. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9365. const auto kv_head = kv_self->head;
  9366. const auto & layer = model.layers[il];
  9367. bool has_gating = layer.time_mix_g1 && layer.time_mix_g2;
  9368. ggml_tensor * sx = ggml_sub(ctx0, x_prev, cur);
  9369. ggml_tensor * dummy = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_embd, n_seq_tokens, n_seqs, has_gating ? 6 : 5);
  9370. sx = ggml_repeat(ctx0, sx, dummy);
  9371. ggml_tensor * xxx = ggml_add(ctx0, ggml_mul(ctx0, sx, layer.time_mix_lerp_fused), cur);
  9372. ggml_tensor * xr = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], 0);
  9373. ggml_tensor * xw = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float));
  9374. ggml_tensor * xk = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float));
  9375. ggml_tensor * xv = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float));
  9376. ggml_tensor * xa = ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float));
  9377. ggml_tensor * xg = has_gating ? ggml_view_2d(ctx0, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 5 * sizeof(float)) : nullptr;
  9378. ggml_tensor * r = build_lora_mm(layer.time_mix_receptance, xr);
  9379. ggml_tensor * w = ggml_add(
  9380. ctx0,
  9381. ggml_mul_mat(ctx0, layer.time_mix_w2, ggml_tanh(ctx0, ggml_mul_mat(ctx0, layer.time_mix_w1, xw))),
  9382. layer.time_mix_w0
  9383. );
  9384. w = ggml_exp(ctx0, ggml_scale(ctx0, ggml_sigmoid(ctx0, w), -0.606531));
  9385. ggml_tensor * k = build_lora_mm(layer.time_mix_key, xk);
  9386. ggml_tensor * v = build_lora_mm(layer.time_mix_value, xv);
  9387. if (first_layer_value == nullptr) {
  9388. first_layer_value = v;
  9389. } else {
  9390. // Add the first layer value as a residual connection.
  9391. v = ggml_add(ctx0, v,
  9392. ggml_mul(ctx0,
  9393. ggml_sub(ctx0, first_layer_value, v),
  9394. ggml_sigmoid(ctx0, ggml_add(ctx0,
  9395. ggml_mul_mat(ctx0, layer.time_mix_v2, ggml_mul_mat(ctx0, layer.time_mix_v1, xv)),
  9396. layer.time_mix_v0
  9397. )
  9398. )
  9399. )
  9400. );
  9401. }
  9402. ggml_tensor * g = nullptr;
  9403. if (layer.time_mix_g1 && layer.time_mix_g2) {
  9404. g = ggml_mul_mat(ctx0, layer.time_mix_g2, ggml_sigmoid(ctx0, ggml_mul_mat(ctx0, layer.time_mix_g1, xg)));
  9405. }
  9406. ggml_tensor * a = ggml_sigmoid(ctx0,
  9407. ggml_add(
  9408. ctx0,
  9409. ggml_mul_mat(ctx0, layer.time_mix_a2, ggml_mul_mat(ctx0, layer.time_mix_a1, xa)),
  9410. layer.time_mix_a0
  9411. )
  9412. );
  9413. ggml_tensor * kk = ggml_reshape_3d(ctx0, ggml_mul(ctx0, k, layer.time_mix_k_k), head_size, head_count, n_tokens);
  9414. kk = ggml_l2_norm(ctx0, kk, 1e-12);
  9415. ggml_tensor * ka = ggml_mul(ctx0, k, layer.time_mix_k_a);
  9416. k = ggml_add(ctx0, k, ggml_sub(ctx0, ggml_mul(ctx0, a, ka), ka));
  9417. r = ggml_reshape_3d(ctx0, r, head_size, head_count, n_tokens);
  9418. w = ggml_reshape_3d(ctx0, w, head_size, head_count, n_tokens);
  9419. k = ggml_reshape_3d(ctx0, k, head_size, head_count, n_tokens);
  9420. v = ggml_reshape_3d(ctx0, v, head_size, head_count, n_tokens);
  9421. a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens);
  9422. ggml_tensor * wkv_state = build_copy_mask_state(
  9423. gf, kv_self->v_l[il], state_copy, state_mask,
  9424. hparams.n_embd_v_s(), n_seqs);
  9425. ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state);
  9426. cur = ggml_view_1d(ctx0, wkv_output, n_embd * n_tokens, 0);
  9427. wkv_state = ggml_view_1d(ctx0, wkv_output, n_embd * head_size * n_seqs, n_embd * n_tokens * sizeof(float));
  9428. ggml_build_forward_expand(
  9429. gf,
  9430. ggml_cpy(
  9431. ctx0,
  9432. wkv_state,
  9433. ggml_view_1d(
  9434. ctx0,
  9435. kv_self->v_l[il],
  9436. hparams.n_embd_v_s() * n_seqs,
  9437. hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self->v_l[il])
  9438. )
  9439. )
  9440. );
  9441. if (layer.time_mix_ln && layer.time_mix_ln_b) {
  9442. // group norm with head_count groups
  9443. cur = ggml_reshape_3d(ctx0, cur, n_embd / head_count, head_count, n_tokens);
  9444. cur = ggml_norm(ctx0, cur, 64e-5f);
  9445. // Convert back to regular vectors.
  9446. cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
  9447. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, layer.time_mix_ln), layer.time_mix_ln_b);
  9448. } else {
  9449. cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
  9450. }
  9451. ggml_tensor * rk = ggml_sum_rows(ctx0,
  9452. ggml_mul(ctx0, ggml_mul(ctx0, k, r), ggml_reshape_2d(ctx0, layer.time_mix_r_k, head_size, head_count)));
  9453. cur = ggml_add(ctx0, cur, ggml_reshape_2d(ctx0, ggml_mul(ctx0, v, rk), n_embd, n_tokens));
  9454. if (has_gating) {
  9455. cur = ggml_mul(ctx0, cur, g);
  9456. }
  9457. cur = build_lora_mm(layer.time_mix_output, cur);
  9458. return ggml_reshape_3d(ctx0, cur, n_embd, n_seq_tokens, n_seqs);
  9459. }
  9460. };
  9461. struct llm_build_rwkv7 : public llm_build_rwkv7_base {
  9462. llm_build_rwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) {
  9463. GGML_ASSERT(hparams.token_shift_count == 2);
  9464. ggml_tensor * cur;
  9465. ggml_tensor * inpL;
  9466. ggml_tensor * v_first = nullptr;
  9467. inpL = build_inp_embd(model.tok_embd);
  9468. inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1);
  9469. ggml_tensor * state_copy = build_inp_s_copy();
  9470. ggml_tensor * state_mask = build_inp_s_mask();
  9471. const auto n_embd = hparams.n_embd;
  9472. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9473. const auto n_seqs = ubatch.n_seqs;
  9474. for (int il = 0; il < n_layer; ++il) {
  9475. const llama_layer * layer = &model.layers[il];
  9476. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
  9477. ggml_tensor * token_shift = build_rwkv_token_shift_load(
  9478. gf, state_copy, state_mask, ubatch, il
  9479. );
  9480. ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0);
  9481. ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift));
  9482. ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM, il);
  9483. cb(att_norm, "attn_norm", il);
  9484. ggml_tensor * x_prev = ggml_concat(
  9485. ctx0,
  9486. att_shift,
  9487. ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0),
  9488. 1
  9489. );
  9490. cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, state_mask, v_first, ubatch, il);
  9491. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  9492. cb(ffn_inp, "ffn_inp", il);
  9493. ggml_tensor * ffn_norm = build_norm(ffn_inp, layer->attn_norm_2, layer->attn_norm_2_b, LLM_NORM, il);
  9494. cb(ffn_norm, "ffn_norm", il);
  9495. x_prev = ggml_concat(
  9496. ctx0,
  9497. ffn_shift,
  9498. ggml_view_3d(ctx0, ffn_norm, n_embd, n_seq_tokens - 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], 0),
  9499. 1
  9500. );
  9501. token_shift = ggml_concat(ctx0,
  9502. ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)),
  9503. ggml_view_3d(ctx0, ffn_norm, n_embd, 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(ffn_norm)),
  9504. 1
  9505. );
  9506. ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
  9507. if (il == n_layer - 1) {
  9508. // skip computing output for unused tokens
  9509. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9510. ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
  9511. ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids);
  9512. x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids);
  9513. }
  9514. cur = build_rwkv7_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV7);
  9515. cur = ggml_add(ctx0, cur, ffn_inp);
  9516. cur = build_cvec(cur, il);
  9517. cb(cur, "l_out", il);
  9518. // input for next layer
  9519. inpL = cur;
  9520. }
  9521. cur = inpL;
  9522. cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM, -1);
  9523. cb(cur, "result_norm", -1);
  9524. res->t_embd = cur;
  9525. cur = build_lora_mm(model.output, cur);
  9526. cb(cur, "result_output", -1);
  9527. res->t_logits = cur;
  9528. ggml_build_forward_expand(gf, cur);
  9529. }
  9530. };
  9531. struct llm_build_arwkv7 : public llm_build_rwkv7_base {
  9532. llm_build_arwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) {
  9533. GGML_ASSERT(n_embd == hparams.n_embd_k_s());
  9534. ggml_tensor * cur;
  9535. ggml_tensor * inpL;
  9536. ggml_tensor * v_first = nullptr;
  9537. inpL = build_inp_embd(model.tok_embd);
  9538. ggml_tensor * state_copy = build_inp_s_copy();
  9539. ggml_tensor * state_mask = build_inp_s_mask();
  9540. const auto n_embd = hparams.n_embd;
  9541. const auto n_seq_tokens = ubatch.n_seq_tokens;
  9542. const auto n_seqs = ubatch.n_seqs;
  9543. for (int il = 0; il < n_layer; ++il) {
  9544. const llama_layer * layer = &model.layers[il];
  9545. inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
  9546. ggml_tensor * token_shift = build_rwkv_token_shift_load(
  9547. gf, state_copy, state_mask, ubatch, il
  9548. );
  9549. ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il);
  9550. cb(att_norm, "attn_norm", il);
  9551. ggml_tensor * x_prev = ggml_concat(
  9552. ctx0,
  9553. token_shift,
  9554. ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0),
  9555. 1
  9556. );
  9557. cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, state_mask, v_first, ubatch, il);
  9558. token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm));
  9559. ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il));
  9560. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  9561. cb(ffn_inp, "ffn_inp", il);
  9562. if (il == n_layer - 1) {
  9563. // skip computing output for unused tokens
  9564. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9565. cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids);
  9566. ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids);
  9567. }
  9568. // feed-forward network
  9569. cur = build_norm(ffn_inp,
  9570. model.layers[il].ffn_norm, NULL,
  9571. LLM_NORM_RMS, il);
  9572. cb(cur, "ffn_norm", il);
  9573. cur = build_ffn(cur,
  9574. model.layers[il].ffn_up, NULL, NULL,
  9575. model.layers[il].ffn_gate, NULL, NULL,
  9576. model.layers[il].ffn_down, NULL, NULL,
  9577. NULL,
  9578. LLM_FFN_SILU, LLM_FFN_PAR, il);
  9579. cb(cur, "ffn_out", il);
  9580. cur = ggml_add(ctx0, cur, ffn_inp);
  9581. cur = build_cvec(cur, il);
  9582. cb(cur, "l_out", il);
  9583. // input for next layer
  9584. inpL = cur;
  9585. }
  9586. cur = inpL;
  9587. cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM_RMS, -1);
  9588. cb(cur, "result_norm", -1);
  9589. res->t_embd = cur;
  9590. cur = build_lora_mm(model.output, cur);
  9591. cb(cur, "result_output", -1);
  9592. res->t_logits = cur;
  9593. ggml_build_forward_expand(gf, cur);
  9594. }
  9595. };
  9596. // ref: https://github.com/facebookresearch/chameleon
  9597. // based on the original build_llama() function, changes:
  9598. // * qk-norm
  9599. // * swin-norm
  9600. // * removed bias
  9601. // * removed MoE
  9602. struct llm_build_chameleon : public llm_graph_context {
  9603. llm_build_chameleon(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  9604. const int64_t n_embd_head = hparams.n_embd_head_v;
  9605. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  9606. GGML_ASSERT(n_embd_head == hparams.n_rot);
  9607. ggml_tensor * cur;
  9608. ggml_tensor * inpL;
  9609. inpL = build_inp_embd(model.tok_embd);
  9610. // inp_pos - contains the positions
  9611. ggml_tensor * inp_pos = build_inp_pos();
  9612. auto * inp_attn = build_attn_inp_kv_unified();
  9613. for (int il = 0; il < n_layer; ++il) {
  9614. ggml_tensor * inpSA = inpL;
  9615. // norm
  9616. if (hparams.swin_norm) {
  9617. cur = inpL;
  9618. } else {
  9619. cur = build_norm(inpL,
  9620. model.layers[il].attn_norm, NULL,
  9621. LLM_NORM_RMS, il);
  9622. cb(cur, "attn_norm", il);
  9623. }
  9624. // self-attention
  9625. {
  9626. // compute Q and K and RoPE them
  9627. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  9628. cb(Qcur, "Qcur", il);
  9629. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  9630. cb(Kcur, "Kcur", il);
  9631. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  9632. cb(Vcur, "Vcur", il);
  9633. if (model.layers[il].attn_q_norm) {
  9634. Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens,
  9635. ggml_element_size(Qcur) * n_embd_head,
  9636. ggml_element_size(Qcur) * n_embd_head * n_head,
  9637. 0);
  9638. cb(Qcur, "Qcur", il);
  9639. Qcur = build_norm(Qcur,
  9640. model.layers[il].attn_q_norm,
  9641. model.layers[il].attn_q_norm_b,
  9642. LLM_NORM, il);
  9643. cb(Qcur, "Qcur", il);
  9644. }
  9645. if (model.layers[il].attn_k_norm) {
  9646. Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens,
  9647. ggml_element_size(Kcur) * n_embd_head,
  9648. ggml_element_size(Kcur) * n_embd_head * n_head_kv,
  9649. 0);
  9650. cb(Kcur, "Kcur", il);
  9651. Kcur = build_norm(Kcur,
  9652. model.layers[il].attn_k_norm,
  9653. model.layers[il].attn_k_norm_b,
  9654. LLM_NORM, il);
  9655. cb(Kcur, "Kcur", il);
  9656. }
  9657. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  9658. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  9659. Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
  9660. Qcur = ggml_rope_ext(
  9661. ctx0, Qcur, inp_pos, nullptr,
  9662. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9663. ext_factor, attn_factor, beta_fast, beta_slow
  9664. );
  9665. Kcur = ggml_rope_ext(
  9666. ctx0, Kcur, inp_pos, nullptr,
  9667. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9668. ext_factor, attn_factor, beta_fast, beta_slow
  9669. );
  9670. cb(Qcur, "Qcur", il);
  9671. cb(Kcur, "Kcur", il);
  9672. cb(Vcur, "Vcur", il);
  9673. cur = build_attn(inp_attn, gf,
  9674. model.layers[il].wo, nullptr,
  9675. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
  9676. if (hparams.swin_norm) {
  9677. cur = build_norm(cur,
  9678. model.layers[il].attn_norm, NULL,
  9679. LLM_NORM_RMS, il);
  9680. }
  9681. }
  9682. if (il == n_layer - 1) {
  9683. // skip computing output for unused tokens
  9684. ggml_tensor * inp_out_ids = build_inp_out_ids();
  9685. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  9686. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  9687. }
  9688. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  9689. cb(ffn_inp, "ffn_inp", il);
  9690. // feed-forward network
  9691. if (!hparams.swin_norm) {
  9692. cur = build_norm(ffn_inp,
  9693. model.layers[il].ffn_norm, NULL,
  9694. LLM_NORM_RMS, il);
  9695. cb(cur, "ffn_norm", il);
  9696. }
  9697. cur = build_ffn(cur,
  9698. model.layers[il].ffn_up, NULL, NULL,
  9699. model.layers[il].ffn_gate, NULL, NULL,
  9700. model.layers[il].ffn_down, NULL, NULL,
  9701. NULL,
  9702. LLM_FFN_SILU, LLM_FFN_PAR, il);
  9703. cb(cur, "ffn_out", il);
  9704. if (hparams.swin_norm) {
  9705. cur = build_norm(cur,
  9706. model.layers[il].ffn_norm, NULL,
  9707. LLM_NORM_RMS, il);
  9708. cb(cur, "ffn_norm", il);
  9709. }
  9710. cur = ggml_add(ctx0, cur, ffn_inp);
  9711. cb(cur, "ffn_out", il);
  9712. cur = build_cvec(cur, il);
  9713. cb(cur, "l_out", il);
  9714. // input for next layer
  9715. inpL = cur;
  9716. }
  9717. cur = inpL;
  9718. cur = build_norm(cur,
  9719. model.output_norm, NULL,
  9720. LLM_NORM_RMS, -1);
  9721. cb(cur, "result_norm", -1);
  9722. res->t_embd = cur;
  9723. // lm_head
  9724. cur = build_lora_mm(model.output, cur);
  9725. cb(cur, "result_output_with_img_logits", -1);
  9726. // TODO: this suppresses the output of image tokens, which is required to enable text-only outputs.
  9727. // Needs to be removed once image outputs are supported.
  9728. int img_token_end_idx = 8196;
  9729. int img_token_start_idx = 4;
  9730. int num_img_tokens = img_token_end_idx - img_token_start_idx;
  9731. // creates 1d tensor of size num_img_tokens and values -FLT_MAX,
  9732. // which ensures that text token values are always at least larger than image token values
  9733. ggml_tensor * img_logits = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, num_img_tokens);
  9734. img_logits = ggml_clamp(ctx0, img_logits, -FLT_MAX, -FLT_MAX);
  9735. cb(img_logits, "img_logits", -1);
  9736. cur = ggml_set_1d(ctx0, cur, img_logits, ggml_element_size(cur) * img_token_start_idx);
  9737. cb(cur, "result_output", -1);
  9738. res->t_logits = cur;
  9739. ggml_build_forward_expand(gf, cur);
  9740. }
  9741. };
  9742. struct llm_build_wavtokenizer_dec : public llm_graph_context {
  9743. llm_build_wavtokenizer_dec(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  9744. ggml_tensor * cur;
  9745. ggml_tensor * inpL;
  9746. inpL = build_inp_embd(model.tok_embd);
  9747. cur = ggml_cont(ctx0, ggml_transpose(ctx0, inpL));
  9748. cur = ggml_conv_1d_ph(ctx0, model.conv1d, cur, 1, 1);
  9749. cur = ggml_add(ctx0, cur, model.conv1d_b);
  9750. // posnet
  9751. for (uint32_t il = 0; il < hparams.posnet.n_layer; ++il) {
  9752. const auto & layer = model.layers[il].posnet;
  9753. inpL = cur;
  9754. switch (il) {
  9755. case 0:
  9756. case 1:
  9757. case 3:
  9758. case 4:
  9759. {
  9760. cur = build_norm(cur,
  9761. layer.norm1,
  9762. layer.norm1_b,
  9763. LLM_NORM_GROUP, 0);
  9764. cur = ggml_mul(ctx0, ggml_sigmoid(ctx0, cur), cur);
  9765. cur = ggml_conv_1d_ph(ctx0, layer.conv1, cur, 1, 1);
  9766. cur = ggml_add(ctx0, cur, layer.conv1_b);
  9767. cur = build_norm(cur,
  9768. layer.norm2,
  9769. layer.norm2_b,
  9770. LLM_NORM_GROUP, 0);
  9771. cur = ggml_mul(ctx0, ggml_sigmoid(ctx0, cur), cur);
  9772. cur = ggml_conv_1d_ph(ctx0, layer.conv2, cur, 1, 1);
  9773. cur = ggml_add(ctx0, cur, layer.conv2_b);
  9774. cur = ggml_add(ctx0, cur, inpL);
  9775. } break;
  9776. case 2:
  9777. {
  9778. cur = build_norm(cur,
  9779. layer.attn_norm,
  9780. layer.attn_norm_b,
  9781. LLM_NORM_GROUP, 0);
  9782. ggml_tensor * q;
  9783. ggml_tensor * k;
  9784. ggml_tensor * v;
  9785. q = ggml_conv_1d_ph(ctx0, layer.attn_q, cur, 1, 1);
  9786. k = ggml_conv_1d_ph(ctx0, layer.attn_k, cur, 1, 1);
  9787. v = ggml_conv_1d_ph(ctx0, layer.attn_v, cur, 1, 1);
  9788. q = ggml_add(ctx0, q, layer.attn_q_b);
  9789. k = ggml_add(ctx0, k, layer.attn_k_b);
  9790. v = ggml_add(ctx0, v, layer.attn_v_b);
  9791. q = ggml_cont(ctx0, ggml_transpose(ctx0, q));
  9792. k = ggml_cont(ctx0, ggml_transpose(ctx0, k));
  9793. ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  9794. kq = ggml_soft_max_ext(ctx0, kq, nullptr, 1.0f/sqrtf(float(hparams.posnet.n_embd)), 0.0f);
  9795. cur = ggml_mul_mat(ctx0, kq, v);
  9796. cur = ggml_conv_1d_ph(ctx0, layer.attn_o, cur, 1, 1);
  9797. cur = ggml_add(ctx0, cur, layer.attn_o_b);
  9798. cur = ggml_add(ctx0, cur, inpL);
  9799. } break;
  9800. case 5:
  9801. {
  9802. cur = build_norm(cur,
  9803. layer.norm,
  9804. layer.norm_b,
  9805. LLM_NORM_GROUP, 0);
  9806. } break;
  9807. default: GGML_ABORT("unknown posnet layer");
  9808. };
  9809. }
  9810. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  9811. cur = build_norm(cur,
  9812. model.tok_norm,
  9813. model.tok_norm_b,
  9814. LLM_NORM, -1);
  9815. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  9816. inpL = cur;
  9817. // convnext
  9818. for (uint32_t il = 0; il < hparams.convnext.n_layer; ++il) {
  9819. const auto & layer = model.layers[il].convnext;
  9820. cur = inpL;
  9821. cur = ggml_conv_1d_dw_ph(ctx0, layer.dw, cur, 1, 1);
  9822. cur = ggml_add(ctx0, cur, layer.dw_b);
  9823. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  9824. cur = build_norm(cur,
  9825. layer.norm,
  9826. layer.norm_b,
  9827. LLM_NORM, -1);
  9828. cur = build_ffn(cur,
  9829. layer.pw1, layer.pw1_b, NULL,
  9830. NULL, NULL, NULL,
  9831. layer.pw2, layer.pw2_b, NULL,
  9832. NULL,
  9833. LLM_FFN_GELU, LLM_FFN_SEQ, il);
  9834. cur = ggml_mul(ctx0, cur, layer.gamma);
  9835. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  9836. inpL = ggml_add(ctx0, cur, inpL);
  9837. }
  9838. cur = inpL;
  9839. cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
  9840. cur = build_norm(cur,
  9841. model.output_norm,
  9842. model.output_norm_b,
  9843. LLM_NORM, -1);
  9844. // lm_head
  9845. cur = build_lora_mm(model.output, cur);
  9846. cur = ggml_add(ctx0, cur, model.output_b);
  9847. cb(cur, "result_embd", -1);
  9848. res->t_embd = cur;
  9849. ggml_build_forward_expand(gf, cur);
  9850. }
  9851. };
  9852. struct llm_build_plm : public llm_graph_context {
  9853. llm_build_plm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  9854. const float kq_scale = 1.0f/sqrtf(float(hparams.n_embd_head_k));
  9855. const uint32_t n_embd_head_qk_rope = hparams.n_rot;
  9856. const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  9857. const uint32_t kv_lora_rank = hparams.n_lora_kv;
  9858. ggml_tensor * cur;
  9859. ggml_tensor * inpL;
  9860. // {n_embd, n_tokens}
  9861. inpL = build_inp_embd(model.tok_embd);
  9862. // inp_pos - contains the positions
  9863. ggml_tensor * inp_pos = build_inp_pos();
  9864. auto * inp_attn = build_attn_inp_kv_unified();
  9865. for (int il = 0; il < n_layer; ++il) {
  9866. ggml_tensor * inpSA = inpL;
  9867. // norm
  9868. cur = build_norm(inpL,
  9869. model.layers[il].attn_norm, NULL,
  9870. LLM_NORM_RMS, il);
  9871. cb(cur, "attn_norm", il);
  9872. // self_attention
  9873. {
  9874. ggml_tensor * q = NULL;
  9875. q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  9876. cb(q, "q", il);
  9877. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  9878. ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
  9879. ggml_row_size(q->type, hparams.n_embd_head_k),
  9880. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  9881. 0);
  9882. cb(q_nope, "q_nope", il);
  9883. // and {n_head * n_embd_head_qk_rope, n_tokens}
  9884. ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
  9885. ggml_row_size(q->type, hparams.n_embd_head_k),
  9886. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  9887. ggml_row_size(q->type, n_embd_head_qk_nope));
  9888. cb(q_pe, "q_pe", il);
  9889. // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
  9890. ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
  9891. cb(kv_pe_compresseed, "kv_pe_compresseed", il);
  9892. // split into {kv_lora_rank, n_tokens}
  9893. ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
  9894. kv_pe_compresseed->nb[1],
  9895. 0);
  9896. cb(kv_compressed, "kv_compressed", il);
  9897. // and {n_embd_head_qk_rope, n_tokens}
  9898. ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
  9899. kv_pe_compresseed->nb[1],
  9900. kv_pe_compresseed->nb[1],
  9901. ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
  9902. cb(k_pe, "k_pe", il);
  9903. kv_compressed = build_norm(kv_compressed,
  9904. model.layers[il].attn_kv_a_norm, NULL,
  9905. LLM_NORM_RMS, il);
  9906. cb(kv_compressed, "kv_compressed", il);
  9907. // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
  9908. ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
  9909. cb(kv, "kv", il);
  9910. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  9911. ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
  9912. ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
  9913. ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  9914. 0);
  9915. cb(k_nope, "k_nope", il);
  9916. // and {n_head * n_embd_head_v, n_tokens}
  9917. ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
  9918. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  9919. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
  9920. ggml_row_size(kv->type, (n_embd_head_qk_nope)));
  9921. cb(v_states, "v_states", il);
  9922. v_states = ggml_cont(ctx0, v_states);
  9923. cb(v_states, "v_states", il);
  9924. v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,
  9925. ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),
  9926. 0);
  9927. cb(v_states, "v_states", il);
  9928. q_pe = ggml_rope_ext(
  9929. ctx0, q_pe, inp_pos, nullptr,
  9930. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9931. ext_factor, attn_factor, beta_fast, beta_slow
  9932. );
  9933. cb(q_pe, "q_pe", il);
  9934. // shared RoPE key
  9935. k_pe = ggml_rope_ext(
  9936. ctx0, k_pe, inp_pos, nullptr,
  9937. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9938. ext_factor, attn_factor, beta_fast, beta_slow
  9939. );
  9940. cb(k_pe, "k_pe", il);
  9941. ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);
  9942. cb(q_states, "q_states", il);
  9943. ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
  9944. cb(k_states, "k_states", il);
  9945. cur = build_attn(inp_attn, gf,
  9946. model.layers[il].wo, NULL,
  9947. q_states, k_states, v_states, nullptr, kq_scale, il);
  9948. }
  9949. if (il == n_layer - 1) {
  9950. // skip computing output for unused tokens
  9951. ggml_tensor * inp_out_ids = build_inp_out_ids();
  9952. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  9953. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  9954. }
  9955. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  9956. cb(ffn_inp, "ffn_inp", il);
  9957. cur = build_norm(ffn_inp,
  9958. model.layers[il].ffn_norm, NULL,
  9959. LLM_NORM_RMS, il);
  9960. cb(cur, "ffn_norm", il);
  9961. cur = build_ffn(cur,
  9962. model.layers[il].ffn_up, NULL, NULL,
  9963. NULL, NULL, NULL,
  9964. model.layers[il].ffn_down, NULL, NULL,
  9965. NULL,
  9966. LLM_FFN_RELU_SQR, LLM_FFN_SEQ, il);
  9967. cb(cur, "ffn_out", il);
  9968. cur = ggml_add(ctx0, cur, ffn_inp);
  9969. cur = build_cvec(cur, il);
  9970. cb(cur, "l_out", il);
  9971. // input for next layer
  9972. inpL = cur;
  9973. }
  9974. cur = inpL;
  9975. cur = build_norm(cur,
  9976. model.output_norm, NULL,
  9977. LLM_NORM_RMS, -1);
  9978. cb(cur, "result_norm", -1);
  9979. res->t_embd = cur;
  9980. cur = build_lora_mm(model.output, cur);
  9981. cb(cur, "result_output", -1);
  9982. res->t_logits = cur;
  9983. ggml_build_forward_expand(gf, cur);
  9984. }
  9985. };
  9986. struct llm_build_bailingmoe : public llm_graph_context {
  9987. llm_build_bailingmoe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
  9988. ggml_tensor * cur;
  9989. ggml_tensor * inpL;
  9990. inpL = build_inp_embd(model.tok_embd);
  9991. // inp_pos - contains the positions
  9992. ggml_tensor * inp_pos = build_inp_pos();
  9993. auto * inp_attn = build_attn_inp_kv_unified();
  9994. for (int il = 0; il < n_layer; ++il) {
  9995. ggml_tensor * inpSA = inpL;
  9996. // norm
  9997. cur = build_norm(inpL,
  9998. model.layers[il].attn_norm, NULL,
  9999. LLM_NORM_RMS, il);
  10000. cb(cur, "attn_norm", il);
  10001. // self-attention
  10002. {
  10003. // rope freq factors for llama3; may return nullptr for llama2 and other models
  10004. ggml_tensor * rope_factors = static_cast<const llama_kv_cache_unified *>(memory)->cbs.get_rope_factors(n_ctx_per_seq, il);
  10005. // compute Q and K and RoPE them
  10006. ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
  10007. cb(Qcur, "Qcur", il);
  10008. if (model.layers[il].bq) {
  10009. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  10010. cb(Qcur, "Qcur", il);
  10011. }
  10012. ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
  10013. cb(Kcur, "Kcur", il);
  10014. if (model.layers[il].bk) {
  10015. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  10016. cb(Kcur, "Kcur", il);
  10017. }
  10018. ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
  10019. cb(Vcur, "Vcur", il);
  10020. if (model.layers[il].bv) {
  10021. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  10022. cb(Vcur, "Vcur", il);
  10023. }
  10024. Qcur = ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens);
  10025. Kcur = ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens);
  10026. Vcur = ggml_reshape_3d(ctx0, Vcur, n_rot, n_head_kv, n_tokens);
  10027. Qcur = ggml_rope_ext(
  10028. ctx0, Qcur, inp_pos, rope_factors,
  10029. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  10030. ext_factor, attn_factor, beta_fast, beta_slow
  10031. );
  10032. Kcur = ggml_rope_ext(
  10033. ctx0, Kcur, inp_pos, rope_factors,
  10034. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  10035. ext_factor, attn_factor, beta_fast, beta_slow
  10036. );
  10037. cb(Qcur, "Qcur", il);
  10038. cb(Kcur, "Kcur", il);
  10039. cb(Vcur, "Vcur", il);
  10040. cur = build_attn(inp_attn, gf,
  10041. model.layers[il].wo, model.layers[il].bo,
  10042. Qcur, Kcur, Vcur, nullptr, 1.0f/sqrtf(float(n_rot)), il);
  10043. }
  10044. if (il == n_layer - 1) {
  10045. // skip computing output for unused tokens
  10046. ggml_tensor * inp_out_ids = build_inp_out_ids();
  10047. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  10048. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  10049. }
  10050. ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  10051. cb(ffn_inp, "ffn_inp", il);
  10052. cur = build_norm(ffn_inp,
  10053. model.layers[il].ffn_norm, NULL,
  10054. LLM_NORM_RMS, il);
  10055. cb(cur, "ffn_norm", il);
  10056. ggml_tensor * moe_out =
  10057. build_moe_ffn(cur,
  10058. model.layers[il].ffn_gate_inp,
  10059. model.layers[il].ffn_up_exps,
  10060. model.layers[il].ffn_gate_exps,
  10061. model.layers[il].ffn_down_exps,
  10062. nullptr,
  10063. n_expert, n_expert_used,
  10064. LLM_FFN_SILU, hparams.expert_weights_norm,
  10065. false, hparams.expert_weights_scale,
  10066. LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
  10067. il);
  10068. cb(moe_out, "ffn_moe_out", il);
  10069. // FFN shared expert
  10070. {
  10071. ggml_tensor * ffn_shexp = build_ffn(cur,
  10072. model.layers[il].ffn_up_shexp, NULL, NULL,
  10073. model.layers[il].ffn_gate_shexp, NULL, NULL,
  10074. model.layers[il].ffn_down_shexp, NULL, NULL,
  10075. NULL,
  10076. LLM_FFN_SILU, LLM_FFN_PAR, il);
  10077. cb(ffn_shexp, "ffn_shexp", il);
  10078. cur = ggml_add(ctx0, moe_out, ffn_shexp);
  10079. cb(cur, "ffn_out", il);
  10080. }
  10081. cur = ggml_add(ctx0, cur, ffn_inp);
  10082. cur = build_cvec(cur, il);
  10083. cb(cur, "l_out", il);
  10084. // input for next layer
  10085. inpL = cur;
  10086. }
  10087. cur = inpL;
  10088. cur = build_norm(cur,
  10089. model.output_norm, NULL,
  10090. LLM_NORM_RMS, -1);
  10091. cb(cur, "result_norm", -1);
  10092. res->t_embd = cur;
  10093. // lm_head
  10094. cur = build_lora_mm(model.output, cur);
  10095. cb(cur, "result_output", -1);
  10096. res->t_logits = cur;
  10097. ggml_build_forward_expand(gf, cur);
  10098. }
  10099. };
  10100. llama_memory_i * llama_model::create_memory() const {
  10101. llama_memory_i * res;
  10102. switch (arch) {
  10103. case LLM_ARCH_MAMBA:
  10104. case LLM_ARCH_RWKV6:
  10105. case LLM_ARCH_RWKV6QWEN2:
  10106. case LLM_ARCH_RWKV7:
  10107. case LLM_ARCH_ARWKV7:
  10108. {
  10109. res = new llama_kv_cache_unified(hparams, {
  10110. /*.get_rope_factors =*/ nullptr
  10111. });
  10112. } break;
  10113. default:
  10114. {
  10115. res = new llama_kv_cache_unified(hparams, {
  10116. /*.get_rope_factors =*/ [this](uint32_t n_ctx_per_seq, int il) {
  10117. // choose long/short freq factors based on the context size
  10118. if (layers[il].rope_freqs != nullptr) {
  10119. return layers[il].rope_freqs;
  10120. }
  10121. if (n_ctx_per_seq > hparams.n_ctx_orig_yarn) {
  10122. return layers[il].rope_long;
  10123. }
  10124. return layers[il].rope_short;
  10125. }
  10126. });
  10127. }
  10128. }
  10129. return res;
  10130. }
  10131. llm_graph_result_ptr llama_model::build_graph(
  10132. const llm_graph_params & params,
  10133. ggml_cgraph * gf,
  10134. llm_graph_type type) const {
  10135. std::unique_ptr<llm_graph_context> llm;
  10136. switch (arch) {
  10137. case LLM_ARCH_LLAMA:
  10138. case LLM_ARCH_LLAMA4:
  10139. case LLM_ARCH_MINICPM:
  10140. case LLM_ARCH_GRANITE:
  10141. case LLM_ARCH_GRANITE_MOE:
  10142. {
  10143. llm = std::make_unique<llm_build_llama>(*this, params, gf);
  10144. } break;
  10145. case LLM_ARCH_DECI:
  10146. {
  10147. llm = std::make_unique<llm_build_deci>(*this, params, gf);
  10148. } break;
  10149. case LLM_ARCH_BAICHUAN:
  10150. {
  10151. llm = std::make_unique<llm_build_baichuan>(*this, params, gf);
  10152. } break;
  10153. case LLM_ARCH_FALCON:
  10154. {
  10155. llm = std::make_unique<llm_build_falcon>(*this, params, gf);
  10156. } break;
  10157. case LLM_ARCH_GROK:
  10158. {
  10159. llm = std::make_unique<llm_build_grok>(*this, params, gf);
  10160. } break;
  10161. case LLM_ARCH_STARCODER:
  10162. {
  10163. llm = std::make_unique<llm_build_starcoder>(*this, params, gf);
  10164. } break;
  10165. case LLM_ARCH_REFACT:
  10166. {
  10167. llm = std::make_unique<llm_build_refact>(*this, params, gf);
  10168. } break;
  10169. case LLM_ARCH_BERT:
  10170. case LLM_ARCH_JINA_BERT_V2:
  10171. case LLM_ARCH_NOMIC_BERT:
  10172. {
  10173. llm = std::make_unique<llm_build_bert>(*this, params, gf);
  10174. } break;
  10175. case LLM_ARCH_BLOOM:
  10176. {
  10177. llm = std::make_unique<llm_build_bloom>(*this, params, gf);
  10178. } break;
  10179. case LLM_ARCH_MPT:
  10180. {
  10181. llm = std::make_unique<llm_build_mpt>(*this, params, gf);
  10182. } break;
  10183. case LLM_ARCH_STABLELM:
  10184. {
  10185. llm = std::make_unique<llm_build_stablelm>(*this, params, gf);
  10186. } break;
  10187. case LLM_ARCH_QWEN:
  10188. {
  10189. llm = std::make_unique<llm_build_qwen>(*this, params, gf);
  10190. } break;
  10191. case LLM_ARCH_QWEN2:
  10192. {
  10193. llm = std::make_unique<llm_build_qwen2>(*this, params, gf);
  10194. } break;
  10195. case LLM_ARCH_QWEN2VL:
  10196. {
  10197. llm = std::make_unique<llm_build_qwen2vl>(*this, params, gf);
  10198. } break;
  10199. case LLM_ARCH_QWEN2MOE:
  10200. {
  10201. llm = std::make_unique<llm_build_qwen2moe>(*this, params, gf);
  10202. } break;
  10203. case LLM_ARCH_QWEN3:
  10204. {
  10205. llm = std::make_unique<llm_build_qwen3>(*this, params, gf);
  10206. } break;
  10207. case LLM_ARCH_QWEN3MOE:
  10208. {
  10209. llm = std::make_unique<llm_build_qwen3moe>(*this, params, gf);
  10210. } break;
  10211. case LLM_ARCH_PHI2:
  10212. {
  10213. llm = std::make_unique<llm_build_phi2>(*this, params, gf);
  10214. } break;
  10215. case LLM_ARCH_PHI3:
  10216. case LLM_ARCH_PHIMOE:
  10217. {
  10218. llm = std::make_unique<llm_build_phi3>(*this, params, gf);
  10219. } break;
  10220. case LLM_ARCH_PLAMO:
  10221. {
  10222. llm = std::make_unique<llm_build_plamo>(*this, params, gf);
  10223. } break;
  10224. case LLM_ARCH_GPT2:
  10225. {
  10226. llm = std::make_unique<llm_build_gpt2>(*this, params, gf);
  10227. } break;
  10228. case LLM_ARCH_CODESHELL:
  10229. {
  10230. llm = std::make_unique<llm_build_codeshell>(*this, params, gf);
  10231. } break;
  10232. case LLM_ARCH_ORION:
  10233. {
  10234. llm = std::make_unique<llm_build_orion>(*this, params, gf);
  10235. } break;
  10236. case LLM_ARCH_INTERNLM2:
  10237. {
  10238. llm = std::make_unique<llm_build_internlm2>(*this, params, gf);
  10239. } break;
  10240. case LLM_ARCH_MINICPM3:
  10241. {
  10242. llm = std::make_unique<llm_build_minicpm3>(*this, params, gf);
  10243. } break;
  10244. case LLM_ARCH_GEMMA:
  10245. {
  10246. llm = std::make_unique<llm_build_gemma>(*this, params, gf);
  10247. } break;
  10248. case LLM_ARCH_GEMMA2:
  10249. {
  10250. llm = std::make_unique<llm_build_gemma2>(*this, params, gf);
  10251. } break;
  10252. case LLM_ARCH_GEMMA3:
  10253. {
  10254. llm = std::make_unique<llm_build_gemma3>(*this, params, gf);
  10255. } break;
  10256. case LLM_ARCH_STARCODER2:
  10257. {
  10258. llm = std::make_unique<llm_build_starcoder2>(*this, params, gf);
  10259. } break;
  10260. case LLM_ARCH_MAMBA:
  10261. {
  10262. llm = std::make_unique<llm_build_mamba>(*this, params, gf);
  10263. } break;
  10264. case LLM_ARCH_XVERSE:
  10265. {
  10266. llm = std::make_unique<llm_build_xverse>(*this, params, gf);
  10267. } break;
  10268. case LLM_ARCH_COMMAND_R:
  10269. {
  10270. llm = std::make_unique<llm_build_command_r>(*this, params, gf);
  10271. } break;
  10272. case LLM_ARCH_COHERE2:
  10273. {
  10274. llm = std::make_unique<llm_build_cohere2>(*this, params, gf);
  10275. } break;
  10276. case LLM_ARCH_DBRX:
  10277. {
  10278. llm = std::make_unique<llm_build_dbrx>(*this, params, gf);
  10279. } break;
  10280. case LLM_ARCH_OLMO:
  10281. {
  10282. llm = std::make_unique<llm_build_olmo>(*this, params, gf);
  10283. } break;
  10284. case LLM_ARCH_OLMO2:
  10285. {
  10286. llm = std::make_unique<llm_build_olmo2>(*this, params, gf);
  10287. } break;
  10288. case LLM_ARCH_OLMOE:
  10289. {
  10290. llm = std::make_unique<llm_build_olmoe>(*this, params, gf);
  10291. } break;
  10292. case LLM_ARCH_OPENELM:
  10293. {
  10294. llm = std::make_unique<llm_build_openelm>(*this, params, gf);
  10295. } break;
  10296. case LLM_ARCH_GPTNEOX:
  10297. {
  10298. llm = std::make_unique<llm_build_gptneox>(*this, params, gf);
  10299. } break;
  10300. case LLM_ARCH_ARCTIC:
  10301. {
  10302. llm = std::make_unique<llm_build_arctic>(*this, params, gf);
  10303. } break;
  10304. case LLM_ARCH_DEEPSEEK:
  10305. {
  10306. llm = std::make_unique<llm_build_deepseek>(*this, params, gf);
  10307. } break;
  10308. case LLM_ARCH_DEEPSEEK2:
  10309. {
  10310. llm = std::make_unique<llm_build_deepseek2>(*this, params, gf);
  10311. } break;
  10312. case LLM_ARCH_CHATGLM:
  10313. {
  10314. llm = std::make_unique<llm_build_chatglm>(*this, params, gf);
  10315. } break;
  10316. case LLM_ARCH_GLM4:
  10317. {
  10318. llm = std::make_unique<llm_build_glm4>(*this, params, gf);
  10319. } break;
  10320. case LLM_ARCH_BITNET:
  10321. {
  10322. llm = std::make_unique<llm_build_bitnet>(*this, params, gf);
  10323. } break;
  10324. case LLM_ARCH_T5:
  10325. {
  10326. switch (type) {
  10327. case LLM_GRAPH_TYPE_ENCODER:
  10328. llm = std::make_unique<llm_build_t5_enc>(*this, params, gf);
  10329. break;
  10330. case LLM_GRAPH_TYPE_DEFAULT:
  10331. case LLM_GRAPH_TYPE_DECODER:
  10332. llm = std::make_unique<llm_build_t5_dec>(*this, params, gf);
  10333. break;
  10334. default:
  10335. GGML_ABORT("invalid graph type");
  10336. };
  10337. } break;
  10338. case LLM_ARCH_T5ENCODER:
  10339. {
  10340. llm = std::make_unique<llm_build_t5_enc>(*this, params, gf);
  10341. }
  10342. break;
  10343. case LLM_ARCH_JAIS:
  10344. {
  10345. llm = std::make_unique<llm_build_jais>(*this, params, gf);
  10346. } break;
  10347. case LLM_ARCH_NEMOTRON:
  10348. {
  10349. llm = std::make_unique<llm_build_nemotron>(*this, params, gf);
  10350. } break;
  10351. case LLM_ARCH_EXAONE:
  10352. {
  10353. llm = std::make_unique<llm_build_exaone>(*this, params, gf);
  10354. } break;
  10355. case LLM_ARCH_RWKV6:
  10356. {
  10357. llm = std::make_unique<llm_build_rwkv6>(*this, params, gf);
  10358. } break;
  10359. case LLM_ARCH_RWKV6QWEN2:
  10360. {
  10361. llm = std::make_unique<llm_build_rwkv6qwen2>(*this, params, gf);
  10362. } break;
  10363. case LLM_ARCH_RWKV7:
  10364. {
  10365. llm = std::make_unique<llm_build_rwkv7>(*this, params, gf);
  10366. } break;
  10367. case LLM_ARCH_ARWKV7:
  10368. {
  10369. llm = std::make_unique<llm_build_arwkv7>(*this, params, gf);
  10370. } break;
  10371. case LLM_ARCH_CHAMELEON:
  10372. {
  10373. llm = std::make_unique<llm_build_chameleon>(*this, params, gf);
  10374. } break;
  10375. case LLM_ARCH_WAVTOKENIZER_DEC:
  10376. {
  10377. llm = std::make_unique<llm_build_wavtokenizer_dec>(*this, params, gf);
  10378. } break;
  10379. case LLM_ARCH_PLM:
  10380. {
  10381. llm = std::make_unique<llm_build_plm>(*this, params, gf);
  10382. } break;
  10383. case LLM_ARCH_BAILINGMOE:
  10384. {
  10385. llm = std::make_unique<llm_build_bailingmoe>(*this, params, gf);
  10386. } break;
  10387. default:
  10388. GGML_ABORT("fatal error");
  10389. }
  10390. // add on pooling layer
  10391. llm->build_pooling(gf, cls, cls_b, cls_out, cls_out_b);
  10392. return std::move(llm->res);
  10393. }
  10394. //
  10395. // interface implementation
  10396. //
  10397. llama_model_params llama_model_default_params() {
  10398. llama_model_params result = {
  10399. /*.devices =*/ nullptr,
  10400. /*.tensor_buft_overrides =*/ nullptr,
  10401. /*.n_gpu_layers =*/ 0,
  10402. /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
  10403. /*.main_gpu =*/ 0,
  10404. /*.tensor_split =*/ nullptr,
  10405. /*.progress_callback =*/ nullptr,
  10406. /*.progress_callback_user_data =*/ nullptr,
  10407. /*.kv_overrides =*/ nullptr,
  10408. /*.vocab_only =*/ false,
  10409. /*.use_mmap =*/ true,
  10410. /*.use_mlock =*/ false,
  10411. /*.check_tensors =*/ false,
  10412. };
  10413. #ifdef GGML_USE_METAL
  10414. // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
  10415. result.n_gpu_layers = 999;
  10416. #endif
  10417. return result;
  10418. }
  10419. const llama_vocab * llama_model_get_vocab(const llama_model * model) {
  10420. return &model->vocab;
  10421. }
  10422. void llama_free_model(llama_model * model) {
  10423. llama_model_free(model);
  10424. }
  10425. void llama_model_free(llama_model * model) {
  10426. delete model;
  10427. }
  10428. int32_t llama_model_n_ctx_train(const llama_model * model) {
  10429. return model->hparams.n_ctx_train;
  10430. }
  10431. int32_t llama_model_n_embd(const llama_model * model) {
  10432. return model->hparams.n_embd;
  10433. }
  10434. int32_t llama_model_n_layer(const llama_model * model) {
  10435. return model->hparams.n_layer;
  10436. }
  10437. int32_t llama_model_n_head(const llama_model * model) {
  10438. return model->hparams.n_head();
  10439. }
  10440. int32_t llama_model_n_head_kv(const llama_model * model) {
  10441. return model->hparams.n_head_kv();
  10442. }
  10443. // deprecated
  10444. int32_t llama_n_ctx_train(const llama_model * model) {
  10445. return llama_model_n_ctx_train(model);
  10446. }
  10447. // deprecated
  10448. int32_t llama_n_embd(const llama_model * model) {
  10449. return llama_model_n_embd(model);
  10450. }
  10451. // deprecated
  10452. int32_t llama_n_layer(const llama_model * model) {
  10453. return llama_model_n_layer(model);
  10454. }
  10455. // deprecated
  10456. int32_t llama_n_head(const llama_model * model) {
  10457. return llama_model_n_head(model);
  10458. }
  10459. llama_rope_type llama_model_rope_type(const llama_model * model) {
  10460. switch (model->arch) {
  10461. // these models do not use RoPE
  10462. case LLM_ARCH_GPT2:
  10463. case LLM_ARCH_GPTJ:
  10464. case LLM_ARCH_MPT:
  10465. case LLM_ARCH_REFACT:
  10466. case LLM_ARCH_BLOOM:
  10467. case LLM_ARCH_MAMBA:
  10468. case LLM_ARCH_JINA_BERT_V2:
  10469. case LLM_ARCH_T5:
  10470. case LLM_ARCH_T5ENCODER:
  10471. case LLM_ARCH_JAIS:
  10472. case LLM_ARCH_RWKV6:
  10473. case LLM_ARCH_RWKV6QWEN2:
  10474. case LLM_ARCH_RWKV7:
  10475. case LLM_ARCH_ARWKV7:
  10476. case LLM_ARCH_WAVTOKENIZER_DEC:
  10477. return LLAMA_ROPE_TYPE_NONE;
  10478. // use what we call a normal RoPE, operating on pairs of consecutive head values
  10479. case LLM_ARCH_LLAMA:
  10480. case LLM_ARCH_LLAMA4:
  10481. case LLM_ARCH_DECI:
  10482. case LLM_ARCH_BAICHUAN:
  10483. case LLM_ARCH_STARCODER:
  10484. case LLM_ARCH_PLAMO:
  10485. case LLM_ARCH_ORION:
  10486. case LLM_ARCH_INTERNLM2:
  10487. case LLM_ARCH_MINICPM:
  10488. case LLM_ARCH_XVERSE:
  10489. case LLM_ARCH_COMMAND_R:
  10490. case LLM_ARCH_COHERE2:
  10491. case LLM_ARCH_OLMO:
  10492. case LLM_ARCH_ARCTIC:
  10493. case LLM_ARCH_DEEPSEEK:
  10494. case LLM_ARCH_DEEPSEEK2:
  10495. case LLM_ARCH_PLM:
  10496. case LLM_ARCH_CHATGLM:
  10497. case LLM_ARCH_GLM4:
  10498. case LLM_ARCH_GRANITE:
  10499. case LLM_ARCH_GRANITE_MOE:
  10500. case LLM_ARCH_CHAMELEON:
  10501. case LLM_ARCH_BAILINGMOE:
  10502. return LLAMA_ROPE_TYPE_NORM;
  10503. // the pairs of head values are offset by n_rot/2
  10504. case LLM_ARCH_FALCON:
  10505. case LLM_ARCH_GROK:
  10506. case LLM_ARCH_DBRX:
  10507. case LLM_ARCH_BERT:
  10508. case LLM_ARCH_NOMIC_BERT:
  10509. case LLM_ARCH_STABLELM:
  10510. case LLM_ARCH_BITNET:
  10511. case LLM_ARCH_QWEN:
  10512. case LLM_ARCH_QWEN2:
  10513. case LLM_ARCH_QWEN2MOE:
  10514. case LLM_ARCH_QWEN3:
  10515. case LLM_ARCH_QWEN3MOE:
  10516. case LLM_ARCH_OLMO2:
  10517. case LLM_ARCH_OLMOE:
  10518. case LLM_ARCH_PHI2:
  10519. case LLM_ARCH_PHI3:
  10520. case LLM_ARCH_PHIMOE:
  10521. case LLM_ARCH_GEMMA:
  10522. case LLM_ARCH_GEMMA2:
  10523. case LLM_ARCH_GEMMA3:
  10524. case LLM_ARCH_STARCODER2:
  10525. case LLM_ARCH_OPENELM:
  10526. case LLM_ARCH_GPTNEOX:
  10527. case LLM_ARCH_CODESHELL:
  10528. case LLM_ARCH_NEMOTRON:
  10529. case LLM_ARCH_EXAONE:
  10530. case LLM_ARCH_MINICPM3:
  10531. return LLAMA_ROPE_TYPE_NEOX;
  10532. case LLM_ARCH_QWEN2VL:
  10533. return LLAMA_ROPE_TYPE_MROPE;
  10534. // all model arches should be listed explicitly here
  10535. case LLM_ARCH_UNKNOWN:
  10536. GGML_ABORT("unknown architecture");
  10537. }
  10538. return LLAMA_ROPE_TYPE_NONE;
  10539. }
  10540. float llama_model_rope_freq_scale_train(const llama_model * model) {
  10541. return model->hparams.rope_freq_scale_train;
  10542. }
  10543. int32_t llama_model_meta_val_str(const llama_model * model, const char * key, char * buf, size_t buf_size) {
  10544. const auto & it = model->gguf_kv.find(key);
  10545. if (it == model->gguf_kv.end()) {
  10546. if (buf_size > 0) {
  10547. buf[0] = '\0';
  10548. }
  10549. return -1;
  10550. }
  10551. return snprintf(buf, buf_size, "%s", it->second.c_str());
  10552. }
  10553. int32_t llama_model_meta_count(const llama_model * model) {
  10554. return (int)model->gguf_kv.size();
  10555. }
  10556. int32_t llama_model_meta_key_by_index(const llama_model * model, int i, char * buf, size_t buf_size) {
  10557. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  10558. if (buf_size > 0) {
  10559. buf[0] = '\0';
  10560. }
  10561. return -1;
  10562. }
  10563. auto it = model->gguf_kv.begin();
  10564. std::advance(it, i);
  10565. return snprintf(buf, buf_size, "%s", it->first.c_str());
  10566. }
  10567. int32_t llama_model_meta_val_str_by_index(const llama_model * model, int32_t i, char * buf, size_t buf_size) {
  10568. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  10569. if (buf_size > 0) {
  10570. buf[0] = '\0';
  10571. }
  10572. return -1;
  10573. }
  10574. auto it = model->gguf_kv.begin();
  10575. std::advance(it, i);
  10576. return snprintf(buf, buf_size, "%s", it->second.c_str());
  10577. }
  10578. int32_t llama_model_desc(const llama_model * model, char * buf, size_t buf_size) {
  10579. return snprintf(buf, buf_size, "%s", model->desc().c_str());
  10580. }
  10581. uint64_t llama_model_size(const llama_model * model) {
  10582. return model->size();
  10583. }
  10584. const char * llama_model_chat_template(const llama_model * model, const char * name) {
  10585. const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE_N)
  10586. : LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE);
  10587. const auto & it = model->gguf_kv.find(key);
  10588. if (it == model->gguf_kv.end()) {
  10589. return nullptr;
  10590. }
  10591. return it->second.c_str();
  10592. }
  10593. uint64_t llama_model_n_params(const llama_model * model) {
  10594. return model->n_elements();
  10595. }
  10596. bool llama_model_has_encoder(const llama_model * model) {
  10597. switch (model->arch) {
  10598. case LLM_ARCH_T5: return true;
  10599. case LLM_ARCH_T5ENCODER: return true;
  10600. default: return false;
  10601. }
  10602. }
  10603. bool llama_model_has_decoder(const llama_model * model) {
  10604. switch (model->arch) {
  10605. case LLM_ARCH_T5ENCODER: return false;
  10606. default: return true;
  10607. }
  10608. }
  10609. llama_token llama_model_decoder_start_token(const llama_model * model) {
  10610. return model->hparams.dec_start_token_id;
  10611. }
  10612. bool llama_model_is_recurrent(const llama_model * model) {
  10613. switch (model->arch) {
  10614. case LLM_ARCH_MAMBA: return true;
  10615. case LLM_ARCH_RWKV6: return true;
  10616. case LLM_ARCH_RWKV6QWEN2: return true;
  10617. case LLM_ARCH_RWKV7: return true;
  10618. case LLM_ARCH_ARWKV7: return true;
  10619. default: return false;
  10620. }
  10621. }
  10622. const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {
  10623. return model->tensors_by_name;
  10624. }