llama.cpp 486 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567
  1. #define LLAMA_API_INTERNAL
  2. #include "llama.h"
  3. #include "unicode.h"
  4. #include "ggml.h"
  5. #include "ggml-alloc.h"
  6. #include "ggml-backend.h"
  7. #ifdef GGML_USE_CUBLAS
  8. # include "ggml-cuda.h"
  9. #elif defined(GGML_USE_CLBLAST)
  10. # include "ggml-opencl.h"
  11. #elif defined(GGML_USE_VULKAN)
  12. # include "ggml-vulkan.h"
  13. #elif defined(GGML_USE_SYCL)
  14. # include "ggml-sycl.h"
  15. #elif defined(GGML_USE_KOMPUTE)
  16. # include "ggml-kompute.h"
  17. #endif
  18. #ifdef GGML_USE_METAL
  19. # include "ggml-metal.h"
  20. #endif
  21. #ifdef GGML_USE_MPI
  22. # include "ggml-mpi.h"
  23. #endif
  24. #ifndef QK_K
  25. # ifdef GGML_QKK_64
  26. # define QK_K 64
  27. # else
  28. # define QK_K 256
  29. # endif
  30. #endif
  31. #ifdef __has_include
  32. #if __has_include(<unistd.h>)
  33. #include <unistd.h>
  34. #if defined(_POSIX_MAPPED_FILES)
  35. #include <sys/mman.h>
  36. #include <fcntl.h>
  37. #endif
  38. #if defined(_POSIX_MEMLOCK_RANGE)
  39. #include <sys/resource.h>
  40. #endif
  41. #endif
  42. #endif
  43. #if defined(_WIN32)
  44. #define WIN32_LEAN_AND_MEAN
  45. #ifndef NOMINMAX
  46. #define NOMINMAX
  47. #endif
  48. #include <windows.h>
  49. #include <io.h>
  50. #endif
  51. #include <algorithm>
  52. #include <array>
  53. #include <cassert>
  54. #include <cfloat>
  55. #include <cinttypes>
  56. #include <climits>
  57. #include <cmath>
  58. #include <cstdarg>
  59. #include <cstddef>
  60. #include <cstdint>
  61. #include <cstdio>
  62. #include <cstring>
  63. #include <ctime>
  64. #include <forward_list>
  65. #include <fstream>
  66. #include <functional>
  67. #include <initializer_list>
  68. #include <map>
  69. #include <memory>
  70. #include <mutex>
  71. #include <numeric>
  72. #include <queue>
  73. #include <random>
  74. #include <regex>
  75. #include <set>
  76. #include <sstream>
  77. #include <thread>
  78. #include <type_traits>
  79. #include <unordered_map>
  80. #if defined(_MSC_VER)
  81. #pragma warning(disable: 4244 4267) // possible loss of data
  82. #endif
  83. #ifdef __GNUC__
  84. #ifdef __MINGW32__
  85. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  86. #else
  87. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  88. #endif
  89. #else
  90. #define LLAMA_ATTRIBUTE_FORMAT(...)
  91. #endif
  92. #define LLAMA_MAX_NODES 8192
  93. #define LLAMA_MAX_EXPERTS 8
  94. //
  95. // logging
  96. //
  97. LLAMA_ATTRIBUTE_FORMAT(2, 3)
  98. static void llama_log_internal (ggml_log_level level, const char* format, ...);
  99. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
  100. #define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
  101. #define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
  102. #define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  103. //
  104. // helpers
  105. //
  106. static size_t utf8_len(char src) {
  107. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  108. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  109. return lookup[highbits];
  110. }
  111. static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
  112. std::string result;
  113. for (size_t pos = 0; ; pos += search.length()) {
  114. auto new_pos = s.find(search, pos);
  115. if (new_pos == std::string::npos) {
  116. result += s.substr(pos, s.size() - pos);
  117. break;
  118. }
  119. result += s.substr(pos, new_pos - pos) + replace;
  120. pos = new_pos;
  121. }
  122. s = std::move(result);
  123. }
  124. static bool is_float_close(float a, float b, float abs_tol) {
  125. // Check for non-negative tolerance
  126. if (abs_tol < 0.0) {
  127. throw std::invalid_argument("Tolerance must be non-negative");
  128. }
  129. // Exact equality check
  130. if (a == b) {
  131. return true;
  132. }
  133. // Check for infinities
  134. if (std::isinf(a) || std::isinf(b)) {
  135. return false;
  136. }
  137. // Regular comparison using the provided absolute tolerance
  138. return std::fabs(b - a) <= abs_tol;
  139. }
  140. static void zeros(std::ofstream & file, size_t n) {
  141. char zero = 0;
  142. for (size_t i = 0; i < n; ++i) {
  143. file.write(&zero, 1);
  144. }
  145. }
  146. LLAMA_ATTRIBUTE_FORMAT(1, 2)
  147. static std::string format(const char * fmt, ...) {
  148. va_list ap;
  149. va_list ap2;
  150. va_start(ap, fmt);
  151. va_copy(ap2, ap);
  152. int size = vsnprintf(NULL, 0, fmt, ap);
  153. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  154. std::vector<char> buf(size + 1);
  155. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  156. GGML_ASSERT(size2 == size);
  157. va_end(ap2);
  158. va_end(ap);
  159. return std::string(buf.data(), size);
  160. }
  161. //
  162. // gguf constants (sync with gguf.py)
  163. //
  164. enum llm_arch {
  165. LLM_ARCH_LLAMA,
  166. LLM_ARCH_FALCON,
  167. LLM_ARCH_BAICHUAN,
  168. LLM_ARCH_GPT2,
  169. LLM_ARCH_GPTJ,
  170. LLM_ARCH_GPTNEOX,
  171. LLM_ARCH_MPT,
  172. LLM_ARCH_STARCODER,
  173. LLM_ARCH_PERSIMMON,
  174. LLM_ARCH_REFACT,
  175. LLM_ARCH_BERT,
  176. LLM_ARCH_NOMIC_BERT,
  177. LLM_ARCH_BLOOM,
  178. LLM_ARCH_STABLELM,
  179. LLM_ARCH_QWEN,
  180. LLM_ARCH_QWEN2,
  181. LLM_ARCH_PHI2,
  182. LLM_ARCH_PLAMO,
  183. LLM_ARCH_CODESHELL,
  184. LLM_ARCH_ORION,
  185. LLM_ARCH_INTERNLM2,
  186. LLM_ARCH_MINICPM,
  187. LLM_ARCH_UNKNOWN,
  188. };
  189. static std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
  190. { LLM_ARCH_LLAMA, "llama" },
  191. { LLM_ARCH_FALCON, "falcon" },
  192. { LLM_ARCH_GPT2, "gpt2" },
  193. { LLM_ARCH_GPTJ, "gptj" },
  194. { LLM_ARCH_GPTNEOX, "gptneox" },
  195. { LLM_ARCH_MPT, "mpt" },
  196. { LLM_ARCH_BAICHUAN, "baichuan" },
  197. { LLM_ARCH_STARCODER, "starcoder" },
  198. { LLM_ARCH_PERSIMMON, "persimmon" },
  199. { LLM_ARCH_REFACT, "refact" },
  200. { LLM_ARCH_BERT, "bert" },
  201. { LLM_ARCH_NOMIC_BERT, "nomic-bert" },
  202. { LLM_ARCH_BLOOM, "bloom" },
  203. { LLM_ARCH_STABLELM, "stablelm" },
  204. { LLM_ARCH_QWEN, "qwen" },
  205. { LLM_ARCH_QWEN2, "qwen2" },
  206. { LLM_ARCH_PHI2, "phi2" },
  207. { LLM_ARCH_PLAMO, "plamo" },
  208. { LLM_ARCH_CODESHELL, "codeshell" },
  209. { LLM_ARCH_ORION, "orion" },
  210. { LLM_ARCH_INTERNLM2, "internlm2" },
  211. { LLM_ARCH_MINICPM, "minicpm" },
  212. };
  213. enum llm_kv {
  214. LLM_KV_GENERAL_ARCHITECTURE,
  215. LLM_KV_GENERAL_QUANTIZATION_VERSION,
  216. LLM_KV_GENERAL_ALIGNMENT,
  217. LLM_KV_GENERAL_NAME,
  218. LLM_KV_GENERAL_AUTHOR,
  219. LLM_KV_GENERAL_URL,
  220. LLM_KV_GENERAL_DESCRIPTION,
  221. LLM_KV_GENERAL_LICENSE,
  222. LLM_KV_GENERAL_SOURCE_URL,
  223. LLM_KV_GENERAL_SOURCE_HF_REPO,
  224. LLM_KV_CONTEXT_LENGTH,
  225. LLM_KV_EMBEDDING_LENGTH,
  226. LLM_KV_BLOCK_COUNT,
  227. LLM_KV_FEED_FORWARD_LENGTH,
  228. LLM_KV_USE_PARALLEL_RESIDUAL,
  229. LLM_KV_TENSOR_DATA_LAYOUT,
  230. LLM_KV_EXPERT_COUNT,
  231. LLM_KV_EXPERT_USED_COUNT,
  232. LLM_KV_POOLING_LAYER,
  233. LLM_KV_ATTENTION_HEAD_COUNT,
  234. LLM_KV_ATTENTION_HEAD_COUNT_KV,
  235. LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
  236. LLM_KV_ATTENTION_CLAMP_KQV,
  237. LLM_KV_ATTENTION_KEY_LENGTH,
  238. LLM_KV_ATTENTION_VALUE_LENGTH,
  239. LLM_KV_ATTENTION_LAYERNORM_EPS,
  240. LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
  241. LLM_KV_ATTENTION_CAUSAL,
  242. LLM_KV_ROPE_DIMENSION_COUNT,
  243. LLM_KV_ROPE_FREQ_BASE,
  244. LLM_KV_ROPE_SCALE_LINEAR,
  245. LLM_KV_ROPE_SCALING_TYPE,
  246. LLM_KV_ROPE_SCALING_FACTOR,
  247. LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
  248. LLM_KV_ROPE_SCALING_FINETUNED,
  249. LLM_KV_TOKENIZER_MODEL,
  250. LLM_KV_TOKENIZER_LIST,
  251. LLM_KV_TOKENIZER_TOKEN_TYPE,
  252. LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,
  253. LLM_KV_TOKENIZER_SCORES,
  254. LLM_KV_TOKENIZER_MERGES,
  255. LLM_KV_TOKENIZER_BOS_ID,
  256. LLM_KV_TOKENIZER_EOS_ID,
  257. LLM_KV_TOKENIZER_UNK_ID,
  258. LLM_KV_TOKENIZER_SEP_ID,
  259. LLM_KV_TOKENIZER_PAD_ID,
  260. LLM_KV_TOKENIZER_ADD_BOS,
  261. LLM_KV_TOKENIZER_ADD_EOS,
  262. LLM_KV_TOKENIZER_ADD_PREFIX,
  263. LLM_KV_TOKENIZER_HF_JSON,
  264. LLM_KV_TOKENIZER_RWKV,
  265. };
  266. static std::map<llm_kv, const char *> LLM_KV_NAMES = {
  267. { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
  268. { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
  269. { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
  270. { LLM_KV_GENERAL_NAME, "general.name" },
  271. { LLM_KV_GENERAL_AUTHOR, "general.author" },
  272. { LLM_KV_GENERAL_URL, "general.url" },
  273. { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
  274. { LLM_KV_GENERAL_LICENSE, "general.license" },
  275. { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" },
  276. { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" },
  277. { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
  278. { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
  279. { LLM_KV_BLOCK_COUNT, "%s.block_count" },
  280. { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
  281. { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
  282. { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
  283. { LLM_KV_EXPERT_COUNT, "%s.expert_count" },
  284. { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
  285. { LLM_KV_POOLING_LAYER, "%s.pooling_layer" },
  286. { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
  287. { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
  288. { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
  289. { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
  290. { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" },
  291. { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" },
  292. { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
  293. { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
  294. { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" },
  295. { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
  296. { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
  297. { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
  298. { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" },
  299. { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" },
  300. { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
  301. { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" },
  302. { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
  303. { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
  304. { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
  305. { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, "tokenizer.ggml.token_type_count" },
  306. { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
  307. { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
  308. { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
  309. { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
  310. { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
  311. { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
  312. { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
  313. { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
  314. { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
  315. { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" },
  316. { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
  317. { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
  318. };
  319. struct LLM_KV {
  320. LLM_KV(llm_arch arch) : arch(arch) {}
  321. llm_arch arch;
  322. std::string operator()(llm_kv kv) const {
  323. return ::format(LLM_KV_NAMES[kv], LLM_ARCH_NAMES[arch]);
  324. }
  325. };
  326. enum llm_tensor {
  327. LLM_TENSOR_TOKEN_EMBD,
  328. LLM_TENSOR_TOKEN_EMBD_NORM,
  329. LLM_TENSOR_TOKEN_TYPES,
  330. LLM_TENSOR_POS_EMBD,
  331. LLM_TENSOR_OUTPUT,
  332. LLM_TENSOR_OUTPUT_NORM,
  333. LLM_TENSOR_ROPE_FREQS,
  334. LLM_TENSOR_ATTN_Q,
  335. LLM_TENSOR_ATTN_K,
  336. LLM_TENSOR_ATTN_V,
  337. LLM_TENSOR_ATTN_QKV,
  338. LLM_TENSOR_ATTN_OUT,
  339. LLM_TENSOR_ATTN_NORM,
  340. LLM_TENSOR_ATTN_NORM_2,
  341. LLM_TENSOR_ATTN_OUT_NORM,
  342. LLM_TENSOR_ATTN_ROT_EMBD,
  343. LLM_TENSOR_FFN_GATE_INP,
  344. LLM_TENSOR_FFN_NORM,
  345. LLM_TENSOR_FFN_GATE,
  346. LLM_TENSOR_FFN_DOWN,
  347. LLM_TENSOR_FFN_UP,
  348. LLM_TENSOR_FFN_ACT,
  349. LLM_TENSOR_FFN_DOWN_EXP,
  350. LLM_TENSOR_FFN_GATE_EXP,
  351. LLM_TENSOR_FFN_UP_EXP,
  352. LLM_TENSOR_ATTN_Q_NORM,
  353. LLM_TENSOR_ATTN_K_NORM,
  354. LLM_TENSOR_LAYER_OUT_NORM,
  355. };
  356. static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
  357. {
  358. LLM_ARCH_LLAMA,
  359. {
  360. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  361. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  362. { LLM_TENSOR_OUTPUT, "output" },
  363. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  364. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  365. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  366. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  367. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  368. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  369. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  370. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  371. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  372. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  373. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  374. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  375. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  376. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  377. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  378. },
  379. },
  380. {
  381. LLM_ARCH_BAICHUAN,
  382. {
  383. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  384. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  385. { LLM_TENSOR_OUTPUT, "output" },
  386. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  387. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  388. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  389. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  390. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  391. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  392. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  393. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  394. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  395. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  396. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  397. },
  398. },
  399. {
  400. LLM_ARCH_FALCON,
  401. {
  402. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  403. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  404. { LLM_TENSOR_OUTPUT, "output" },
  405. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  406. { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
  407. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  408. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  409. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  410. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  411. },
  412. },
  413. {
  414. LLM_ARCH_GPT2,
  415. {
  416. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  417. { LLM_TENSOR_POS_EMBD, "position_embd" },
  418. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  419. { LLM_TENSOR_OUTPUT, "output" },
  420. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  421. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  422. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  423. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  424. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  425. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  426. },
  427. },
  428. {
  429. LLM_ARCH_GPTJ,
  430. {
  431. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  432. },
  433. },
  434. {
  435. LLM_ARCH_GPTNEOX,
  436. {
  437. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  438. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  439. { LLM_TENSOR_OUTPUT, "output" },
  440. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  441. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  442. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  443. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  444. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  445. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  446. },
  447. },
  448. {
  449. LLM_ARCH_PERSIMMON,
  450. {
  451. { LLM_TENSOR_TOKEN_EMBD, "token_embd"},
  452. { LLM_TENSOR_OUTPUT_NORM, "output_norm"},
  453. { LLM_TENSOR_OUTPUT, "output"},
  454. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm"},
  455. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv"},
  456. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output"},
  457. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
  458. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
  459. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm"},
  460. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down"},
  461. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up"},
  462. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd"},
  463. },
  464. },
  465. {
  466. LLM_ARCH_MPT,
  467. {
  468. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  469. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  470. { LLM_TENSOR_OUTPUT, "output" },
  471. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  472. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  473. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  474. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  475. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  476. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  477. { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" },
  478. },
  479. },
  480. {
  481. LLM_ARCH_STARCODER,
  482. {
  483. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  484. { LLM_TENSOR_POS_EMBD, "position_embd" },
  485. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  486. { LLM_TENSOR_OUTPUT, "output" },
  487. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  488. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  489. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  490. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  491. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  492. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  493. },
  494. },
  495. {
  496. LLM_ARCH_REFACT,
  497. {
  498. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  499. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  500. { LLM_TENSOR_OUTPUT, "output" },
  501. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  502. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  503. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  504. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  505. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  506. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  507. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  508. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  509. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  510. },
  511. },
  512. {
  513. LLM_ARCH_BERT,
  514. {
  515. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  516. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  517. { LLM_TENSOR_TOKEN_TYPES, "token_types" },
  518. { LLM_TENSOR_POS_EMBD, "position_embd" },
  519. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  520. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  521. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  522. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  523. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  524. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  525. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  526. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  527. },
  528. },
  529. {
  530. LLM_ARCH_NOMIC_BERT,
  531. {
  532. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  533. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  534. { LLM_TENSOR_TOKEN_TYPES, "token_types" },
  535. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  536. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  537. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  538. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  539. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  540. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  541. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  542. },
  543. },
  544. {
  545. LLM_ARCH_BLOOM,
  546. {
  547. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  548. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  549. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  550. { LLM_TENSOR_OUTPUT, "output" },
  551. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  552. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  553. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  554. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  555. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  556. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  557. },
  558. },
  559. {
  560. LLM_ARCH_STABLELM,
  561. {
  562. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  563. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  564. { LLM_TENSOR_OUTPUT, "output" },
  565. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  566. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  567. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  568. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  569. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  570. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  571. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  572. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  573. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  574. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  575. },
  576. },
  577. {
  578. LLM_ARCH_QWEN,
  579. {
  580. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  581. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  582. { LLM_TENSOR_OUTPUT, "output" },
  583. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  584. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  585. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  586. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  587. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  588. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  589. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  590. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  591. },
  592. },
  593. {
  594. LLM_ARCH_QWEN2,
  595. {
  596. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  597. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  598. { LLM_TENSOR_OUTPUT, "output" },
  599. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  600. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  601. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  602. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  603. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  604. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  605. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  606. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  607. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  608. },
  609. },
  610. {
  611. LLM_ARCH_PHI2,
  612. {
  613. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  614. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  615. { LLM_TENSOR_OUTPUT, "output" },
  616. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  617. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  618. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  619. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  620. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  621. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  622. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  623. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  624. },
  625. },
  626. {
  627. LLM_ARCH_PLAMO,
  628. {
  629. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  630. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  631. { LLM_TENSOR_OUTPUT, "output" },
  632. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  633. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  634. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  635. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  636. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  637. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  638. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  639. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  640. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  641. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  642. },
  643. },
  644. {
  645. LLM_ARCH_CODESHELL,
  646. {
  647. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  648. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  649. { LLM_TENSOR_OUTPUT, "output" },
  650. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  651. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  652. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  653. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  654. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  655. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  656. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  657. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  658. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  659. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  660. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  661. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  662. },
  663. },
  664. {
  665. LLM_ARCH_ORION,
  666. {
  667. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  668. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  669. { LLM_TENSOR_OUTPUT, "output" },
  670. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  671. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  672. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  673. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  674. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  675. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  676. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  677. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  678. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  679. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  680. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  681. },
  682. },
  683. {
  684. LLM_ARCH_INTERNLM2,
  685. {
  686. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  687. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  688. { LLM_TENSOR_OUTPUT, "output" },
  689. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  690. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  691. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  692. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  693. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  694. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  695. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  696. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  697. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  698. },
  699. },
  700. {
  701. LLM_ARCH_MINICPM,
  702. {
  703. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  704. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  705. { LLM_TENSOR_OUTPUT, "output" },
  706. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  707. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  708. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  709. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  710. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  711. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  712. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  713. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  714. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  715. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  716. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  717. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  718. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  719. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  720. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  721. },
  722. },
  723. {
  724. LLM_ARCH_UNKNOWN,
  725. {
  726. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  727. },
  728. },
  729. };
  730. static llm_arch llm_arch_from_string(const std::string & name) {
  731. for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
  732. if (kv.second == name) {
  733. return kv.first;
  734. }
  735. }
  736. return LLM_ARCH_UNKNOWN;
  737. }
  738. // helper to handle gguf constants
  739. // usage:
  740. //
  741. // const auto tn = LLM_TN(LLM_ARCH_LLAMA);
  742. //
  743. // std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
  744. // std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
  745. // std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
  746. //
  747. struct LLM_TN {
  748. LLM_TN(llm_arch arch) : arch(arch) {}
  749. llm_arch arch;
  750. std::string operator()(llm_tensor tensor) const {
  751. if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) {
  752. return "__missing__";
  753. }
  754. return LLM_TENSOR_NAMES[arch].at(tensor);
  755. }
  756. std::string operator()(llm_tensor tensor, const std::string & suffix) const {
  757. if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) {
  758. return "__missing__";
  759. }
  760. return LLM_TENSOR_NAMES[arch].at(tensor) + "." + suffix;
  761. }
  762. std::string operator()(llm_tensor tensor, int bid) const {
  763. if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) {
  764. return "__missing__";
  765. }
  766. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid);
  767. }
  768. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
  769. if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) {
  770. return "__missing__";
  771. }
  772. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid) + "." + suffix;
  773. }
  774. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
  775. if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) {
  776. return "__missing__";
  777. }
  778. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid, xid) + "." + suffix;
  779. }
  780. };
  781. //
  782. // gguf helpers
  783. //
  784. static std::map<int32_t, const char *> LLAMA_ROPE_SCALING_TYPES = {
  785. { LLAMA_ROPE_SCALING_NONE, "none" },
  786. { LLAMA_ROPE_SCALING_LINEAR, "linear" },
  787. { LLAMA_ROPE_SCALING_YARN, "yarn" },
  788. };
  789. static int32_t llama_rope_scaling_type_from_string(const std::string & name) {
  790. for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
  791. if (kv.second == name) {
  792. return kv.first;
  793. }
  794. }
  795. return LLAMA_ROPE_SCALING_UNSPECIFIED;
  796. }
  797. static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
  798. switch (type) {
  799. case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
  800. case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
  801. case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
  802. case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
  803. case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
  804. case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
  805. case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
  806. case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
  807. case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
  808. case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
  809. case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
  810. default: return format("unknown type %d", type);
  811. }
  812. }
  813. static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
  814. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  815. switch (type) {
  816. case GGUF_TYPE_STRING:
  817. return gguf_get_val_str(ctx_gguf, i);
  818. case GGUF_TYPE_ARRAY:
  819. {
  820. const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
  821. int arr_n = gguf_get_arr_n(ctx_gguf, i);
  822. const void * data = gguf_get_arr_data(ctx_gguf, i);
  823. std::stringstream ss;
  824. ss << "[";
  825. for (int j = 0; j < arr_n; j++) {
  826. if (arr_type == GGUF_TYPE_STRING) {
  827. std::string val = gguf_get_arr_str(ctx_gguf, i, j);
  828. // escape quotes
  829. replace_all(val, "\\", "\\\\");
  830. replace_all(val, "\"", "\\\"");
  831. ss << '"' << val << '"';
  832. } else if (arr_type == GGUF_TYPE_ARRAY) {
  833. ss << "???";
  834. } else {
  835. ss << gguf_data_to_str(arr_type, data, j);
  836. }
  837. if (j < arr_n - 1) {
  838. ss << ", ";
  839. }
  840. }
  841. ss << "]";
  842. return ss.str();
  843. }
  844. default:
  845. return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
  846. }
  847. }
  848. //
  849. // ggml helpers
  850. //
  851. static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
  852. struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
  853. if (plan.work_size > 0) {
  854. buf.resize(plan.work_size);
  855. plan.work_data = buf.data();
  856. }
  857. ggml_graph_compute(graph, &plan);
  858. }
  859. //
  860. // llama helpers
  861. //
  862. #if defined(_WIN32)
  863. static std::string llama_format_win_err(DWORD err) {
  864. LPSTR buf;
  865. size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
  866. NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
  867. if (!size) {
  868. return "FormatMessageA failed";
  869. }
  870. std::string ret(buf, size);
  871. LocalFree(buf);
  872. return ret;
  873. }
  874. #endif
  875. template <typename T>
  876. struct no_init {
  877. T value;
  878. no_init() { /* do nothing */ }
  879. };
  880. struct llama_file {
  881. // use FILE * so we don't have to re-open the file to mmap
  882. FILE * fp;
  883. size_t size;
  884. llama_file(const char * fname, const char * mode) {
  885. fp = std::fopen(fname, mode);
  886. if (fp == NULL) {
  887. throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
  888. }
  889. seek(0, SEEK_END);
  890. size = tell();
  891. seek(0, SEEK_SET);
  892. }
  893. size_t tell() const {
  894. #ifdef _WIN32
  895. __int64 ret = _ftelli64(fp);
  896. #else
  897. long ret = std::ftell(fp);
  898. #endif
  899. GGML_ASSERT(ret != -1); // this really shouldn't fail
  900. return (size_t) ret;
  901. }
  902. void seek(size_t offset, int whence) const {
  903. #ifdef _WIN32
  904. int ret = _fseeki64(fp, (__int64) offset, whence);
  905. #else
  906. int ret = std::fseek(fp, (long) offset, whence);
  907. #endif
  908. GGML_ASSERT(ret == 0); // same
  909. }
  910. void read_raw(void * ptr, size_t len) const {
  911. if (len == 0) {
  912. return;
  913. }
  914. errno = 0;
  915. std::size_t ret = std::fread(ptr, len, 1, fp);
  916. if (ferror(fp)) {
  917. throw std::runtime_error(format("read error: %s", strerror(errno)));
  918. }
  919. if (ret != 1) {
  920. throw std::runtime_error("unexpectedly reached end of file");
  921. }
  922. }
  923. uint32_t read_u32() const {
  924. uint32_t ret;
  925. read_raw(&ret, sizeof(ret));
  926. return ret;
  927. }
  928. void write_raw(const void * ptr, size_t len) const {
  929. if (len == 0) {
  930. return;
  931. }
  932. errno = 0;
  933. size_t ret = std::fwrite(ptr, len, 1, fp);
  934. if (ret != 1) {
  935. throw std::runtime_error(format("write error: %s", strerror(errno)));
  936. }
  937. }
  938. void write_u32(std::uint32_t val) const {
  939. write_raw(&val, sizeof(val));
  940. }
  941. ~llama_file() {
  942. if (fp) {
  943. std::fclose(fp);
  944. }
  945. }
  946. };
  947. struct llama_mmap {
  948. void * addr;
  949. size_t size;
  950. llama_mmap(const llama_mmap &) = delete;
  951. #ifdef _POSIX_MAPPED_FILES
  952. static constexpr bool SUPPORTED = true;
  953. // list of mapped fragments (first_offset, last_offset)
  954. std::vector<std::pair<size_t, size_t>> mapped_fragments;
  955. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
  956. size = file->size;
  957. int fd = fileno(file->fp);
  958. int flags = MAP_SHARED;
  959. // prefetch/readahead impairs performance on NUMA systems
  960. if (numa) { prefetch = 0; }
  961. #ifdef __linux__
  962. // advise the kernel to read the file sequentially (increases readahead)
  963. if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
  964. LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n",
  965. strerror(errno));
  966. }
  967. if (prefetch) { flags |= MAP_POPULATE; }
  968. #endif
  969. addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
  970. if (addr == MAP_FAILED) { // NOLINT
  971. throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
  972. }
  973. if (prefetch > 0) {
  974. // advise the kernel to preload the mapped memory
  975. if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
  976. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
  977. strerror(errno));
  978. }
  979. }
  980. if (numa) {
  981. // advise the kernel not to use readahead
  982. // (because the next page might not belong on the same node)
  983. if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
  984. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
  985. strerror(errno));
  986. }
  987. }
  988. // initialize list of mapped_fragments
  989. mapped_fragments.emplace_back(0, file->size);
  990. }
  991. static void align_range(size_t * first, size_t * last, size_t page_size) {
  992. // align first to the next page
  993. size_t offset_in_page = *first & (page_size - 1);
  994. size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page;
  995. *first += offset_to_page;
  996. // align last to the previous page
  997. *last = *last & ~(page_size - 1);
  998. if (*last <= *first) {
  999. *last = *first;
  1000. }
  1001. }
  1002. // partially unmap the file in the range [first, last)
  1003. void unmap_fragment(size_t first, size_t last) {
  1004. // note: this function must not be called multiple times with overlapping ranges
  1005. // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
  1006. int page_size = sysconf(_SC_PAGESIZE);
  1007. align_range(&first, &last, page_size);
  1008. size_t len = last - first;
  1009. if (len == 0) {
  1010. return;
  1011. }
  1012. GGML_ASSERT(first % page_size == 0);
  1013. GGML_ASSERT(last % page_size == 0);
  1014. GGML_ASSERT(last > first);
  1015. void * next_page_start = (uint8_t *) addr + first;
  1016. // unmap the range
  1017. if (munmap(next_page_start, len)) {
  1018. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  1019. }
  1020. // update the list of mapped fragments to avoid unmapping the same range again in the destructor
  1021. std::vector<std::pair<size_t, size_t>> new_mapped_fragments;
  1022. for (const auto & frag : mapped_fragments) {
  1023. if (frag.first < first && frag.second > last) {
  1024. // the range is in the middle of the fragment, split it
  1025. new_mapped_fragments.emplace_back(frag.first, first);
  1026. new_mapped_fragments.emplace_back(last, frag.second);
  1027. } else if (frag.first < first && frag.second > first) {
  1028. // the range starts in the middle of the fragment
  1029. new_mapped_fragments.emplace_back(frag.first, first);
  1030. } else if (frag.first < last && frag.second > last) {
  1031. // the range ends in the middle of the fragment
  1032. new_mapped_fragments.emplace_back(last, frag.second);
  1033. } else if (frag.first >= first && frag.second <= last) {
  1034. // the range covers the entire fragment
  1035. } else {
  1036. // the range is outside the fragment
  1037. new_mapped_fragments.push_back(frag);
  1038. }
  1039. }
  1040. mapped_fragments = std::move(new_mapped_fragments);
  1041. }
  1042. ~llama_mmap() {
  1043. for (const auto & frag : mapped_fragments) {
  1044. if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
  1045. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  1046. }
  1047. }
  1048. }
  1049. #elif defined(_WIN32)
  1050. static constexpr bool SUPPORTED = true;
  1051. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) {
  1052. GGML_UNUSED(numa);
  1053. size = file->size;
  1054. HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
  1055. HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
  1056. if (hMapping == NULL) {
  1057. DWORD error = GetLastError();
  1058. throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
  1059. }
  1060. addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
  1061. DWORD error = GetLastError();
  1062. CloseHandle(hMapping);
  1063. if (addr == NULL) {
  1064. throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
  1065. }
  1066. if (prefetch > 0) {
  1067. #if _WIN32_WINNT >= 0x602
  1068. // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
  1069. BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
  1070. HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
  1071. // may fail on pre-Windows 8 systems
  1072. pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
  1073. if (pPrefetchVirtualMemory) {
  1074. // advise the kernel to preload the mapped memory
  1075. WIN32_MEMORY_RANGE_ENTRY range;
  1076. range.VirtualAddress = addr;
  1077. range.NumberOfBytes = (SIZE_T) std::min(size, prefetch);
  1078. if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
  1079. LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n",
  1080. llama_format_win_err(GetLastError()).c_str());
  1081. }
  1082. }
  1083. #else
  1084. throw std::runtime_error("PrefetchVirtualMemory unavailable");
  1085. #endif
  1086. }
  1087. }
  1088. void unmap_fragment(size_t first, size_t last) {
  1089. // not supported
  1090. GGML_UNUSED(first);
  1091. GGML_UNUSED(last);
  1092. }
  1093. ~llama_mmap() {
  1094. if (!UnmapViewOfFile(addr)) {
  1095. LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n",
  1096. llama_format_win_err(GetLastError()).c_str());
  1097. }
  1098. }
  1099. #else
  1100. static constexpr bool SUPPORTED = false;
  1101. llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) {
  1102. GGML_UNUSED(file);
  1103. GGML_UNUSED(prefetch);
  1104. GGML_UNUSED(numa);
  1105. throw std::runtime_error("mmap not supported");
  1106. }
  1107. void unmap_fragment(size_t first, size_t last) {
  1108. GGML_UNUSED(first);
  1109. GGML_UNUSED(last);
  1110. throw std::runtime_error("mmap not supported");
  1111. }
  1112. #endif
  1113. };
  1114. // Represents some region of memory being locked using mlock or VirtualLock;
  1115. // will automatically unlock on destruction.
  1116. struct llama_mlock {
  1117. void * addr = NULL;
  1118. size_t size = 0;
  1119. bool failed_already = false;
  1120. llama_mlock() {}
  1121. llama_mlock(const llama_mlock &) = delete;
  1122. ~llama_mlock() {
  1123. if (size) {
  1124. raw_unlock(addr, size);
  1125. }
  1126. }
  1127. void init(void * ptr) {
  1128. GGML_ASSERT(addr == NULL && size == 0); // NOLINT
  1129. addr = ptr;
  1130. }
  1131. void grow_to(size_t target_size) {
  1132. GGML_ASSERT(addr);
  1133. if (failed_already) {
  1134. return;
  1135. }
  1136. size_t granularity = lock_granularity();
  1137. target_size = (target_size + granularity - 1) & ~(granularity - 1);
  1138. if (target_size > size) {
  1139. if (raw_lock((uint8_t *) addr + size, target_size - size)) {
  1140. size = target_size;
  1141. } else {
  1142. failed_already = true;
  1143. }
  1144. }
  1145. }
  1146. #ifdef _POSIX_MEMLOCK_RANGE
  1147. static constexpr bool SUPPORTED = true;
  1148. static size_t lock_granularity() {
  1149. return (size_t) sysconf(_SC_PAGESIZE);
  1150. }
  1151. #ifdef __APPLE__
  1152. #define MLOCK_SUGGESTION \
  1153. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  1154. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MEMLOCK (ulimit -l).\n"
  1155. #else
  1156. #define MLOCK_SUGGESTION \
  1157. "Try increasing RLIMIT_MEMLOCK ('ulimit -l' as root).\n"
  1158. #endif
  1159. bool raw_lock(const void * addr, size_t size) const {
  1160. if (!mlock(addr, size)) {
  1161. return true;
  1162. }
  1163. char* errmsg = std::strerror(errno);
  1164. bool suggest = (errno == ENOMEM);
  1165. // Check if the resource limit is fine after all
  1166. struct rlimit lock_limit;
  1167. if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
  1168. suggest = false;
  1169. }
  1170. if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
  1171. suggest = false;
  1172. }
  1173. LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
  1174. size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
  1175. return false;
  1176. }
  1177. #undef MLOCK_SUGGESTION
  1178. static void raw_unlock(void * addr, size_t size) {
  1179. if (munlock(addr, size)) {
  1180. LLAMA_LOG_WARN("warning: failed to munlock buffer: %s\n", std::strerror(errno));
  1181. }
  1182. }
  1183. #elif defined(_WIN32)
  1184. static constexpr bool SUPPORTED = true;
  1185. static size_t lock_granularity() {
  1186. SYSTEM_INFO si;
  1187. GetSystemInfo(&si);
  1188. return (size_t) si.dwPageSize;
  1189. }
  1190. bool raw_lock(void * ptr, size_t len) const {
  1191. for (int tries = 1; ; tries++) {
  1192. if (VirtualLock(ptr, len)) {
  1193. return true;
  1194. }
  1195. if (tries == 2) {
  1196. LLAMA_LOG_WARN("warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
  1197. len, size, llama_format_win_err(GetLastError()).c_str());
  1198. return false;
  1199. }
  1200. // It failed but this was only the first try; increase the working
  1201. // set size and try again.
  1202. SIZE_T min_ws_size, max_ws_size;
  1203. if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
  1204. LLAMA_LOG_WARN("warning: GetProcessWorkingSetSize failed: %s\n",
  1205. llama_format_win_err(GetLastError()).c_str());
  1206. return false;
  1207. }
  1208. // Per MSDN: "The maximum number of pages that a process can lock
  1209. // is equal to the number of pages in its minimum working set minus
  1210. // a small overhead."
  1211. // Hopefully a megabyte is enough overhead:
  1212. size_t increment = len + 1048576;
  1213. // The minimum must be <= the maximum, so we need to increase both:
  1214. min_ws_size += increment;
  1215. max_ws_size += increment;
  1216. if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
  1217. LLAMA_LOG_WARN("warning: SetProcessWorkingSetSize failed: %s\n",
  1218. llama_format_win_err(GetLastError()).c_str());
  1219. return false;
  1220. }
  1221. }
  1222. }
  1223. static void raw_unlock(void * ptr, size_t len) {
  1224. if (!VirtualUnlock(ptr, len)) {
  1225. LLAMA_LOG_WARN("warning: failed to VirtualUnlock buffer: %s\n",
  1226. llama_format_win_err(GetLastError()).c_str());
  1227. }
  1228. }
  1229. #else
  1230. static constexpr bool SUPPORTED = false;
  1231. static size_t lock_granularity() {
  1232. return (size_t) 65536;
  1233. }
  1234. bool raw_lock(const void * addr, size_t len) const {
  1235. LLAMA_LOG_WARN("warning: mlock not supported on this system\n");
  1236. return false;
  1237. }
  1238. static void raw_unlock(const void * addr, size_t len) {}
  1239. #endif
  1240. };
  1241. static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) {
  1242. std::vector<char> result(8, 0);
  1243. const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
  1244. if (n_tokens < 0) {
  1245. result.resize(-n_tokens);
  1246. int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
  1247. GGML_ASSERT(check == -n_tokens);
  1248. }
  1249. else {
  1250. result.resize(n_tokens);
  1251. }
  1252. return std::string(result.data(), result.size());
  1253. }
  1254. static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer) {
  1255. ggml_backend_buffer_type_t buft = nullptr;
  1256. #if defined(GGML_USE_CUBLAS)
  1257. // host buffers should only be used when data is expected to be copied to/from the GPU
  1258. if (host_buffer) {
  1259. buft = ggml_backend_cuda_host_buffer_type();
  1260. }
  1261. #elif defined(GGML_USE_SYCL)
  1262. buft = ggml_backend_sycl_host_buffer_type();
  1263. #elif defined(GGML_USE_CPU_HBM)
  1264. buft = ggml_backend_cpu_hbm_buffer_type();
  1265. #elif defined(GGML_USE_VULKAN)
  1266. if (host_buffer) {
  1267. buft = ggml_backend_vk_host_buffer_type();
  1268. }
  1269. #endif
  1270. if (buft == nullptr) {
  1271. buft = ggml_backend_cpu_buffer_type();
  1272. }
  1273. return buft;
  1274. GGML_UNUSED(host_buffer);
  1275. }
  1276. static ggml_backend_buffer_type_t llama_default_buffer_type_offload(int gpu) {
  1277. ggml_backend_buffer_type_t buft = nullptr;
  1278. #ifdef GGML_USE_METAL
  1279. buft = ggml_backend_metal_buffer_type();
  1280. #elif defined(GGML_USE_CUBLAS)
  1281. buft = ggml_backend_cuda_buffer_type(gpu);
  1282. #elif defined(GGML_USE_VULKAN)
  1283. buft = ggml_backend_vk_buffer_type(gpu);
  1284. #elif defined(GGML_USE_SYCL)
  1285. buft = ggml_backend_sycl_buffer_type(gpu);
  1286. #elif defined(GGML_USE_CLBLAST)
  1287. buft = ggml_backend_opencl_buffer_type();
  1288. #elif defined(GGML_USE_KOMPUTE)
  1289. buft = ggml_backend_kompute_buffer_type(gpu);
  1290. if (buft == nullptr) {
  1291. LLAMA_LOG_WARN("%s: cannot use GPU %d, check `vulkaninfo --summary`\n", __func__, gpu);
  1292. }
  1293. #endif
  1294. if (buft == nullptr) {
  1295. buft = llama_default_buffer_type_cpu(true);
  1296. }
  1297. return buft;
  1298. GGML_UNUSED(gpu);
  1299. }
  1300. static ggml_backend_buffer_type_t llama_default_buffer_type_split(int fallback_gpu, const float * tensor_split) {
  1301. ggml_backend_buffer_type_t buft = nullptr;
  1302. #ifdef GGML_USE_CUBLAS
  1303. if (ggml_backend_cuda_get_device_count() > 1) {
  1304. buft = ggml_backend_cuda_split_buffer_type(tensor_split);
  1305. }
  1306. #endif
  1307. if (buft == nullptr) {
  1308. buft = llama_default_buffer_type_offload(fallback_gpu);
  1309. }
  1310. return buft;
  1311. GGML_UNUSED(tensor_split);
  1312. }
  1313. static size_t llama_get_device_count() {
  1314. #if defined(GGML_USE_CUBLAS)
  1315. return ggml_backend_cuda_get_device_count();
  1316. #elif defined(GGML_USE_VULKAN)
  1317. return ggml_backend_vk_get_device_count();
  1318. #else
  1319. return 1;
  1320. #endif
  1321. }
  1322. static size_t llama_get_device_memory(int device) {
  1323. #if defined(GGML_USE_CUBLAS)
  1324. size_t total;
  1325. size_t free;
  1326. ggml_backend_cuda_get_device_memory(device, &total, &free);
  1327. return free;
  1328. #elif defined(GGML_USE_VULKAN)
  1329. size_t total;
  1330. size_t free;
  1331. ggml_backend_vk_get_device_memory(device, &total, &free);
  1332. return free;
  1333. #else
  1334. return 1;
  1335. GGML_UNUSED(device);
  1336. #endif
  1337. }
  1338. //
  1339. // globals
  1340. //
  1341. struct llama_state {
  1342. llama_state() {
  1343. #ifdef GGML_USE_METAL
  1344. ggml_backend_metal_log_set_callback(log_callback, log_callback_user_data);
  1345. #endif
  1346. }
  1347. // We save the log callback globally
  1348. ggml_log_callback log_callback = llama_log_callback_default;
  1349. void * log_callback_user_data = nullptr;
  1350. };
  1351. static llama_state g_state;
  1352. // available llama models
  1353. enum e_model {
  1354. MODEL_UNKNOWN,
  1355. MODEL_17M,
  1356. MODEL_22M,
  1357. MODEL_33M,
  1358. MODEL_109M,
  1359. MODEL_137M,
  1360. MODEL_335M,
  1361. MODEL_0_5B,
  1362. MODEL_1B,
  1363. MODEL_2B,
  1364. MODEL_3B,
  1365. MODEL_4B,
  1366. MODEL_7B,
  1367. MODEL_8B,
  1368. MODEL_13B,
  1369. MODEL_14B,
  1370. MODEL_15B,
  1371. MODEL_20B,
  1372. MODEL_30B,
  1373. MODEL_34B,
  1374. MODEL_40B,
  1375. MODEL_65B,
  1376. MODEL_70B,
  1377. MODEL_SMALL,
  1378. MODEL_MEDIUM,
  1379. MODEL_LARGE,
  1380. MODEL_XL,
  1381. };
  1382. static const size_t kiB = 1024;
  1383. static const size_t MiB = 1024*kiB;
  1384. static const size_t GiB = 1024*MiB;
  1385. struct llama_hparams {
  1386. bool vocab_only;
  1387. bool rope_finetuned;
  1388. uint32_t n_vocab;
  1389. uint32_t n_ctx_train; // context size the model was trained on
  1390. uint32_t n_embd;
  1391. uint32_t n_head;
  1392. uint32_t n_head_kv;
  1393. uint32_t n_layer;
  1394. uint32_t n_rot;
  1395. uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
  1396. uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
  1397. uint32_t n_ff;
  1398. uint32_t n_expert = 0;
  1399. uint32_t n_expert_used = 0;
  1400. uint32_t n_vocab_type = 0; // for BERT-style token types
  1401. float f_norm_eps;
  1402. float f_norm_rms_eps;
  1403. float rope_freq_base_train;
  1404. float rope_freq_scale_train;
  1405. uint32_t n_yarn_orig_ctx;
  1406. int32_t rope_scaling_type_train;
  1407. float f_clamp_kqv;
  1408. float f_max_alibi_bias;
  1409. bool causal_attn = true;
  1410. bool pooling_layer = false;
  1411. bool operator!=(const llama_hparams & other) const {
  1412. if (this->vocab_only != other.vocab_only) return true;
  1413. if (this->n_vocab != other.n_vocab) return true;
  1414. if (this->n_ctx_train != other.n_ctx_train) return true;
  1415. if (this->n_embd != other.n_embd) return true;
  1416. if (this->n_head != other.n_head) return true;
  1417. if (this->n_head_kv != other.n_head_kv) return true;
  1418. if (this->n_layer != other.n_layer) return true;
  1419. if (this->n_rot != other.n_rot) return true;
  1420. if (this->n_embd_head_k != other.n_embd_head_k) return true;
  1421. if (this->n_embd_head_v != other.n_embd_head_v) return true;
  1422. if (this->n_ff != other.n_ff) return true;
  1423. if (this->n_expert != other.n_expert) return true;
  1424. if (this->n_expert_used != other.n_expert_used) return true;
  1425. if (this->rope_finetuned != other.rope_finetuned) return true;
  1426. if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true;
  1427. const float EPSILON = 1e-9f;
  1428. if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
  1429. if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
  1430. if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true;
  1431. if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true;
  1432. return false;
  1433. }
  1434. uint32_t n_gqa() const {
  1435. return n_head/n_head_kv;
  1436. }
  1437. uint32_t n_embd_k_gqa() const { // dimension of key embeddings across all k-v heads
  1438. return n_embd_head_k * n_head_kv;
  1439. }
  1440. uint32_t n_embd_v_gqa() const { // dimension of value embeddings across all k-v heads
  1441. return n_embd_head_v * n_head_kv;
  1442. }
  1443. };
  1444. struct llama_cparams {
  1445. uint32_t n_ctx; // context size used during inference
  1446. uint32_t n_batch;
  1447. uint32_t n_threads; // number of threads to use for generation
  1448. uint32_t n_threads_batch; // number of threads to use for batch processing
  1449. float rope_freq_base;
  1450. float rope_freq_scale;
  1451. uint32_t n_yarn_orig_ctx;
  1452. // These hyperparameters are not exposed in GGUF, because all
  1453. // existing YaRN models use the same values for them.
  1454. float yarn_ext_factor;
  1455. float yarn_attn_factor;
  1456. float yarn_beta_fast;
  1457. float yarn_beta_slow;
  1458. bool mul_mat_q;
  1459. bool offload_kqv;
  1460. bool do_pooling;
  1461. ggml_backend_sched_eval_callback cb_eval;
  1462. void * cb_eval_user_data;
  1463. };
  1464. struct llama_layer {
  1465. // normalization
  1466. struct ggml_tensor * attn_norm;
  1467. struct ggml_tensor * attn_norm_b;
  1468. struct ggml_tensor * attn_norm_2;
  1469. struct ggml_tensor * attn_norm_2_b;
  1470. struct ggml_tensor * attn_q_norm;
  1471. struct ggml_tensor * attn_q_norm_b;
  1472. struct ggml_tensor * attn_k_norm;
  1473. struct ggml_tensor * attn_k_norm_b;
  1474. struct ggml_tensor * attn_out_norm;
  1475. struct ggml_tensor * attn_out_norm_b;
  1476. // attention
  1477. struct ggml_tensor * wq;
  1478. struct ggml_tensor * wk;
  1479. struct ggml_tensor * wv;
  1480. struct ggml_tensor * wo;
  1481. struct ggml_tensor * wqkv;
  1482. // attention bias
  1483. struct ggml_tensor * bq;
  1484. struct ggml_tensor * bk;
  1485. struct ggml_tensor * bv;
  1486. struct ggml_tensor * bo;
  1487. struct ggml_tensor * bqkv;
  1488. // normalization
  1489. struct ggml_tensor * ffn_norm;
  1490. struct ggml_tensor * ffn_norm_b;
  1491. struct ggml_tensor * layer_out_norm;
  1492. struct ggml_tensor * layer_out_norm_b;
  1493. // ff
  1494. struct ggml_tensor * ffn_gate; // w1
  1495. struct ggml_tensor * ffn_down; // w2
  1496. struct ggml_tensor * ffn_up; // w3
  1497. // ff MoE
  1498. struct ggml_tensor * ffn_gate_inp;
  1499. struct ggml_tensor * ffn_gate_exp[LLAMA_MAX_EXPERTS];
  1500. struct ggml_tensor * ffn_down_exp[LLAMA_MAX_EXPERTS];
  1501. struct ggml_tensor * ffn_up_exp [LLAMA_MAX_EXPERTS];
  1502. // ff bias
  1503. struct ggml_tensor * ffn_down_b; // b2
  1504. struct ggml_tensor * ffn_up_b; // b3
  1505. struct ggml_tensor * ffn_act;
  1506. };
  1507. struct llama_kv_cell {
  1508. llama_pos pos = -1;
  1509. llama_pos delta = 0;
  1510. std::set<llama_seq_id> seq_id;
  1511. bool has_seq_id(const llama_seq_id & id) const {
  1512. return seq_id.find(id) != seq_id.end();
  1513. }
  1514. };
  1515. // ring-buffer of cached KV data
  1516. struct llama_kv_cache {
  1517. bool has_shift = false;
  1518. // Note: The value of head isn't only used to optimize searching
  1519. // for a free KV slot. llama_decode_internal also uses it, so it
  1520. // cannot be freely changed after a slot has been allocated.
  1521. uint32_t head = 0;
  1522. uint32_t size = 0;
  1523. uint32_t used = 0; // used cells (i.e. at least one seq_id)
  1524. // computed before each graph build
  1525. uint32_t n = 0;
  1526. std::vector<llama_kv_cell> cells;
  1527. std::vector<struct ggml_tensor *> k_l; // per layer
  1528. std::vector<struct ggml_tensor *> v_l;
  1529. std::vector<struct ggml_context *> ctxs;
  1530. std::vector<ggml_backend_buffer_t> bufs;
  1531. size_t total_size() const {
  1532. size_t size = 0;
  1533. for (ggml_backend_buffer_t buf : bufs) {
  1534. size += ggml_backend_buffer_get_size(buf);
  1535. }
  1536. return size;
  1537. }
  1538. ~llama_kv_cache() {
  1539. for (struct ggml_context * ctx : ctxs) {
  1540. ggml_free(ctx);
  1541. }
  1542. for (ggml_backend_buffer_t buf : bufs) {
  1543. ggml_backend_buffer_free(buf);
  1544. }
  1545. }
  1546. };
  1547. struct llama_vocab {
  1548. using id = int32_t;
  1549. using token = std::string;
  1550. using ttype = llama_token_type;
  1551. struct token_data {
  1552. token text;
  1553. float score;
  1554. ttype type;
  1555. };
  1556. enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM;
  1557. std::unordered_map<token, id> token_to_id;
  1558. std::vector<token_data> id_to_token;
  1559. std::unordered_map<token, id> special_tokens_cache;
  1560. std::map<std::pair<std::string, std::string>, int> bpe_ranks;
  1561. // default LLaMA special tokens
  1562. id special_bos_id = 1;
  1563. id special_eos_id = 2;
  1564. id special_unk_id = 0;
  1565. id special_sep_id = -1;
  1566. id special_pad_id = -1;
  1567. int special_add_bos = -1; // -1 unknown, 1 add, 0 don't add.
  1568. int special_add_eos = -1; // -1 unknown, 1 add, 0 don't add.
  1569. id linefeed_id = 13;
  1570. id special_prefix_id = 32007;
  1571. id special_middle_id = 32009;
  1572. id special_suffix_id = 32008;
  1573. id special_eot_id = 32010;
  1574. bool add_space_prefix = true;
  1575. int find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
  1576. GGML_ASSERT(token_left.find(' ') == std::string::npos);
  1577. GGML_ASSERT(token_left.find('\n') == std::string::npos);
  1578. GGML_ASSERT(token_right.find(' ') == std::string::npos);
  1579. GGML_ASSERT(token_right.find('\n') == std::string::npos);
  1580. auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
  1581. if (it == bpe_ranks.end()) {
  1582. return -1;
  1583. }
  1584. return it->second;
  1585. }
  1586. };
  1587. struct llama_model {
  1588. e_model type = MODEL_UNKNOWN;
  1589. llm_arch arch = LLM_ARCH_UNKNOWN;
  1590. llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
  1591. std::string name = "n/a";
  1592. llama_hparams hparams = {};
  1593. llama_vocab vocab;
  1594. struct ggml_tensor * tok_embd;
  1595. struct ggml_tensor * type_embd;
  1596. struct ggml_tensor * pos_embd;
  1597. struct ggml_tensor * tok_norm;
  1598. struct ggml_tensor * tok_norm_b;
  1599. struct ggml_tensor * output_norm;
  1600. struct ggml_tensor * output_norm_b;
  1601. struct ggml_tensor * output;
  1602. struct ggml_tensor * output_b;
  1603. std::vector<llama_layer> layers;
  1604. llama_split_mode split_mode;
  1605. int main_gpu;
  1606. int n_gpu_layers;
  1607. // gguf metadata
  1608. std::unordered_map<std::string, std::string> gguf_kv;
  1609. // layer -> buffer type mapping
  1610. struct layer_buft {
  1611. layer_buft() : buft_matrix(nullptr), buft(nullptr) {}
  1612. layer_buft(ggml_backend_buffer_type_t matrix) : buft_matrix(matrix), buft(matrix) {}
  1613. layer_buft(ggml_backend_buffer_type_t matrix, ggml_backend_buffer_type_t other) : buft_matrix(matrix), buft(other) {}
  1614. ggml_backend_buffer_type_t buft_matrix; // matrices only - used by split buffers and backends that support only matrix multiplication
  1615. ggml_backend_buffer_type_t buft; // everything else
  1616. };
  1617. layer_buft buft_input;
  1618. layer_buft buft_output;
  1619. std::vector<layer_buft> buft_layer;
  1620. // contexts where the model tensors metadata is stored
  1621. std::vector<struct ggml_context *> ctxs;
  1622. // the model memory buffers for the tensor data
  1623. std::vector<ggml_backend_buffer_t> bufs;
  1624. // model memory mapped file
  1625. std::unique_ptr<llama_mmap> mapping;
  1626. // objects representing data potentially being locked in memory
  1627. std::vector<std::unique_ptr<llama_mlock>> mlock_bufs;
  1628. llama_mlock mlock_mmap;
  1629. // for quantize-stats only
  1630. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  1631. int64_t t_load_us = 0;
  1632. int64_t t_start_us = 0;
  1633. ~llama_model() {
  1634. for (struct ggml_context * ctx : ctxs) {
  1635. ggml_free(ctx);
  1636. }
  1637. for (ggml_backend_buffer_t buf : bufs) {
  1638. ggml_backend_buffer_free(buf);
  1639. }
  1640. }
  1641. };
  1642. struct llama_context {
  1643. llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {}
  1644. ~llama_context() {
  1645. ggml_backend_sched_free(sched);
  1646. for (ggml_backend_t backend : backends) {
  1647. ggml_backend_free(backend);
  1648. }
  1649. #ifdef GGML_USE_VULKAN
  1650. ggml_vk_free_cpu_assist();
  1651. #endif
  1652. ggml_backend_buffer_free(buf_input);
  1653. ggml_free(ctx_input);
  1654. }
  1655. llama_cparams cparams;
  1656. std::vector<ggml_backend_t> backends;
  1657. #ifdef GGML_USE_METAL
  1658. ggml_backend_t backend_metal = nullptr;
  1659. #endif
  1660. ggml_backend_t backend_cpu = nullptr;
  1661. const llama_model & model;
  1662. // key + value cache for the self attention
  1663. struct llama_kv_cache kv_self;
  1664. std::mt19937 rng;
  1665. bool has_evaluated_once = false;
  1666. int64_t t_start_us;
  1667. int64_t t_load_us;
  1668. int64_t t_sample_us = 0;
  1669. int64_t t_p_eval_us = 0;
  1670. int64_t t_eval_us = 0;
  1671. int32_t n_sample = 0; // number of tokens sampled
  1672. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  1673. int32_t n_eval = 0; // number of eval calls
  1674. // decode output (2-dimensional array: [n_tokens][n_vocab])
  1675. std::vector<float> logits;
  1676. #ifndef NDEBUG
  1677. // guard against access to unset logits
  1678. std::vector<bool> logits_valid;
  1679. #endif
  1680. bool logits_all = false;
  1681. // input embedding (1-dimensional array: [n_embd])
  1682. std::vector<float> embedding;
  1683. // memory buffers used to evaluate the model
  1684. std::vector<uint8_t> buf_compute_meta;
  1685. ggml_backend_sched_t sched = nullptr;
  1686. // input tensors
  1687. ggml_backend_buffer_t buf_input = nullptr;
  1688. ggml_context * ctx_input = nullptr;
  1689. struct ggml_tensor * inp_tokens; // I32 [n_batch]
  1690. struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
  1691. struct ggml_tensor * inp_pos; // I32 [n_batch]
  1692. struct ggml_tensor * inp_KQ_mask; // F32 [n_ctx, n_batch]
  1693. struct ggml_tensor * inp_K_shift; // I32 [n_ctx]
  1694. struct ggml_tensor * inp_sum; // F32 [n_batch, n_batch]
  1695. #ifdef GGML_USE_MPI
  1696. ggml_mpi_context * ctx_mpi = NULL;
  1697. #endif
  1698. };
  1699. //
  1700. // kv cache helpers
  1701. //
  1702. static bool llama_kv_cache_init(
  1703. struct llama_kv_cache & cache,
  1704. const llama_model & model,
  1705. ggml_type ktype,
  1706. ggml_type vtype,
  1707. uint32_t n_ctx,
  1708. bool offload) {
  1709. const struct llama_hparams & hparams = model.hparams;
  1710. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  1711. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  1712. const int64_t n_layer = hparams.n_layer;
  1713. cache.has_shift = false;
  1714. cache.head = 0;
  1715. cache.size = n_ctx;
  1716. cache.used = 0;
  1717. cache.cells.clear();
  1718. cache.cells.resize(n_ctx);
  1719. #ifdef GGML_USE_CLBLAST
  1720. offload = false;
  1721. #endif
  1722. // count used buffer types
  1723. std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
  1724. if (offload) {
  1725. for (int64_t i = 0; i < n_layer; ++i) {
  1726. buft_layer_count[model.buft_layer[i].buft]++;
  1727. }
  1728. } else {
  1729. buft_layer_count[llama_default_buffer_type_cpu(true)] = n_layer;
  1730. }
  1731. // create a context for each buffer type
  1732. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  1733. for (auto & it : buft_layer_count) {
  1734. int n_layers = it.second;
  1735. struct ggml_init_params params = {
  1736. /*.mem_size =*/ 2u*n_layers*ggml_tensor_overhead(),
  1737. /*.mem_buffer =*/ NULL,
  1738. /*.no_alloc =*/ true,
  1739. };
  1740. ggml_context * ctx = ggml_init(params);
  1741. if (!ctx) {
  1742. LLAMA_LOG_ERROR("%s: failed to allocate context for kv cache\n", __func__);
  1743. return false;
  1744. }
  1745. ctx_map[it.first] = ctx;
  1746. cache.ctxs.push_back(ctx);
  1747. }
  1748. cache.k_l.reserve(n_layer);
  1749. cache.v_l.reserve(n_layer);
  1750. for (int i = 0; i < (int) n_layer; i++) {
  1751. struct ggml_context * ctx = offload ? ctx_map.at(model.buft_layer[i].buft) : cache.ctxs.front();
  1752. ggml_tensor * k = ggml_new_tensor_1d(ctx, ktype, n_embd_k_gqa*n_ctx);
  1753. ggml_tensor * v = ggml_new_tensor_1d(ctx, vtype, n_embd_v_gqa*n_ctx);
  1754. ggml_format_name(k, "cache_k_l%d", i);
  1755. ggml_format_name(v, "cache_v_l%d", i);
  1756. cache.k_l.push_back(k);
  1757. cache.v_l.push_back(v);
  1758. }
  1759. // allocate tensors and initialize the buffers to avoid NaNs in the padding
  1760. for (auto it : ctx_map) {
  1761. ggml_backend_buffer_type_t buft = it.first;
  1762. ggml_context * ctx = it.second;
  1763. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  1764. if (!buf) {
  1765. LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__);
  1766. return false;
  1767. }
  1768. ggml_backend_buffer_clear(buf, 0);
  1769. LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
  1770. cache.bufs.push_back(buf);
  1771. }
  1772. return true;
  1773. }
  1774. // find an empty slot of size "n_tokens" in the cache
  1775. // updates the cache head
  1776. // Note: On success, it's important that cache.head points
  1777. // to the first cell of the slot.
  1778. static bool llama_kv_cache_find_slot(
  1779. struct llama_kv_cache & cache,
  1780. const struct llama_batch & batch) {
  1781. const uint32_t n_ctx = cache.size;
  1782. const uint32_t n_tokens = batch.n_tokens;
  1783. if (n_tokens > n_ctx) {
  1784. LLAMA_LOG_ERROR("%s: n_tokens=%d > n_ctx=%d\n", __func__, n_tokens, n_ctx);
  1785. return false;
  1786. }
  1787. uint32_t n_tested = 0;
  1788. while (true) {
  1789. if (cache.head + n_tokens > n_ctx) {
  1790. n_tested += n_ctx - cache.head;
  1791. cache.head = 0;
  1792. continue;
  1793. }
  1794. bool found = true;
  1795. for (uint32_t i = 0; i < n_tokens; i++) {
  1796. if (cache.cells[cache.head + i].pos >= 0) {
  1797. found = false;
  1798. cache.head += i + 1;
  1799. n_tested += i + 1;
  1800. break;
  1801. }
  1802. }
  1803. if (found) {
  1804. break;
  1805. }
  1806. if (n_tested >= n_ctx) {
  1807. //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
  1808. return false;
  1809. }
  1810. }
  1811. for (uint32_t i = 0; i < n_tokens; i++) {
  1812. cache.cells[cache.head + i].pos = batch.pos[i];
  1813. for (int32_t j = 0; j < batch.n_seq_id[i]; j++) {
  1814. cache.cells[cache.head + i].seq_id.insert(batch.seq_id[i][j]);
  1815. }
  1816. }
  1817. cache.used += n_tokens;
  1818. return true;
  1819. }
  1820. // find how many cells are currently in use
  1821. static int32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
  1822. for (uint32_t i = cache.size - 1; i > 0; --i) {
  1823. if (cache.cells[i].pos >= 0 && !cache.cells[i].seq_id.empty()) {
  1824. return i + 1;
  1825. }
  1826. }
  1827. return 0;
  1828. }
  1829. static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
  1830. for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
  1831. cache.cells[i].pos = -1;
  1832. cache.cells[i].seq_id.clear();
  1833. }
  1834. cache.head = 0;
  1835. cache.used = 0;
  1836. }
  1837. static void llama_kv_cache_seq_rm(
  1838. struct llama_kv_cache & cache,
  1839. llama_seq_id seq_id,
  1840. llama_pos p0,
  1841. llama_pos p1) {
  1842. uint32_t new_head = cache.size;
  1843. if (p0 < 0) p0 = 0;
  1844. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1845. for (uint32_t i = 0; i < cache.size; ++i) {
  1846. if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1847. if (seq_id < 0) {
  1848. cache.cells[i].seq_id.clear();
  1849. } else if (cache.cells[i].has_seq_id(seq_id)) {
  1850. cache.cells[i].seq_id.erase(seq_id);
  1851. } else {
  1852. continue;
  1853. }
  1854. if (cache.cells[i].seq_id.empty()) {
  1855. // keep count of the number of used cells
  1856. if (cache.cells[i].pos >= 0) cache.used--;
  1857. cache.cells[i].pos = -1;
  1858. if (new_head == cache.size) new_head = i;
  1859. }
  1860. }
  1861. }
  1862. // If we freed up a slot, set head to it so searching can start there.
  1863. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  1864. }
  1865. static void llama_kv_cache_seq_cp(
  1866. struct llama_kv_cache & cache,
  1867. llama_seq_id seq_id_src,
  1868. llama_seq_id seq_id_dst,
  1869. llama_pos p0,
  1870. llama_pos p1) {
  1871. if (p0 < 0) p0 = 0;
  1872. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1873. cache.head = 0;
  1874. for (uint32_t i = 0; i < cache.size; ++i) {
  1875. if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1876. cache.cells[i].seq_id.insert(seq_id_dst);
  1877. }
  1878. }
  1879. }
  1880. static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
  1881. uint32_t new_head = cache.size;
  1882. for (uint32_t i = 0; i < cache.size; ++i) {
  1883. if (!cache.cells[i].has_seq_id(seq_id)) {
  1884. if (cache.cells[i].pos >= 0) cache.used--;
  1885. cache.cells[i].pos = -1;
  1886. cache.cells[i].seq_id.clear();
  1887. if (new_head == cache.size) new_head = i;
  1888. } else {
  1889. cache.cells[i].seq_id.clear();
  1890. cache.cells[i].seq_id.insert(seq_id);
  1891. }
  1892. }
  1893. // If we freed up a slot, set head to it so searching can start there.
  1894. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  1895. }
  1896. static void llama_kv_cache_seq_shift(
  1897. struct llama_kv_cache & cache,
  1898. llama_seq_id seq_id,
  1899. llama_pos p0,
  1900. llama_pos p1,
  1901. llama_pos delta) {
  1902. uint32_t new_head = cache.size;
  1903. if (p0 < 0) p0 = 0;
  1904. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1905. for (uint32_t i = 0; i < cache.size; ++i) {
  1906. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1907. cache.has_shift = true;
  1908. cache.cells[i].pos += delta;
  1909. cache.cells[i].delta += delta;
  1910. if (cache.cells[i].pos < 0) {
  1911. if (!cache.cells[i].seq_id.empty()) cache.used--;
  1912. cache.cells[i].pos = -1;
  1913. cache.cells[i].seq_id.clear();
  1914. if (new_head == cache.size) new_head = i;
  1915. }
  1916. }
  1917. }
  1918. // If we freed up a slot, set head to it so searching can start there.
  1919. // Otherwise we just start the next search from the beginning.
  1920. cache.head = new_head != cache.size ? new_head : 0;
  1921. }
  1922. static void llama_kv_cache_seq_div(
  1923. struct llama_kv_cache & cache,
  1924. llama_seq_id seq_id,
  1925. llama_pos p0,
  1926. llama_pos p1,
  1927. int d) {
  1928. if (p0 < 0) p0 = 0;
  1929. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1930. for (uint32_t i = 0; i < cache.size; ++i) {
  1931. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1932. cache.has_shift = true;
  1933. {
  1934. llama_pos p_old = cache.cells[i].pos;
  1935. cache.cells[i].pos /= d;
  1936. cache.cells[i].delta += cache.cells[i].pos - p_old;
  1937. }
  1938. }
  1939. }
  1940. }
  1941. //
  1942. // model loading and saving
  1943. //
  1944. enum llama_fver {
  1945. GGUF_FILE_VERSION_V1 = 1,
  1946. GGUF_FILE_VERSION_V2 = 2,
  1947. GGUF_FILE_VERSION_V3 = 3,
  1948. };
  1949. static const char * llama_file_version_name(llama_fver version) {
  1950. switch (version) {
  1951. case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
  1952. case GGUF_FILE_VERSION_V2: return "GGUF V2";
  1953. case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
  1954. }
  1955. return "unknown";
  1956. }
  1957. static std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
  1958. char buf[256];
  1959. snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
  1960. for (size_t i = 1; i < ne.size(); i++) {
  1961. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
  1962. }
  1963. return buf;
  1964. }
  1965. static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
  1966. char buf[256];
  1967. snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
  1968. for (int i = 1; i < GGML_MAX_DIMS; i++) {
  1969. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
  1970. }
  1971. return buf;
  1972. }
  1973. namespace GGUFMeta {
  1974. template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int)>
  1975. struct GKV_Base_Type {
  1976. static constexpr gguf_type gt = gt_;
  1977. static T getter(const gguf_context * ctx, const int kid) {
  1978. return gfun(ctx, kid);
  1979. }
  1980. };
  1981. template<typename T> struct GKV_Base;
  1982. template<> struct GKV_Base<bool >: GKV_Base_Type<bool, GGUF_TYPE_BOOL, gguf_get_val_bool> {};
  1983. template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, GGUF_TYPE_UINT8, gguf_get_val_u8 > {};
  1984. template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, GGUF_TYPE_UINT16, gguf_get_val_u16 > {};
  1985. template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, GGUF_TYPE_UINT32, gguf_get_val_u32 > {};
  1986. template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, GGUF_TYPE_UINT64, gguf_get_val_u64 > {};
  1987. template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, GGUF_TYPE_INT8, gguf_get_val_i8 > {};
  1988. template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, GGUF_TYPE_INT16, gguf_get_val_i16 > {};
  1989. template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, GGUF_TYPE_INT32, gguf_get_val_i32 > {};
  1990. template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, GGUF_TYPE_INT64, gguf_get_val_i64 > {};
  1991. template<> struct GKV_Base<float >: GKV_Base_Type<float, GGUF_TYPE_FLOAT32, gguf_get_val_f32 > {};
  1992. template<> struct GKV_Base<double >: GKV_Base_Type<double, GGUF_TYPE_FLOAT64, gguf_get_val_f64 > {};
  1993. template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, GGUF_TYPE_STRING, gguf_get_val_str > {};
  1994. template<> struct GKV_Base<std::string> {
  1995. static constexpr gguf_type gt = GGUF_TYPE_STRING;
  1996. static std::string getter(const gguf_context * ctx, const int kid) {
  1997. return gguf_get_val_str(ctx, kid);
  1998. }
  1999. };
  2000. struct ArrayInfo{
  2001. const gguf_type gt;
  2002. const size_t length;
  2003. const void * data;
  2004. };
  2005. template<> struct GKV_Base<ArrayInfo> {
  2006. public:
  2007. static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
  2008. static ArrayInfo getter(const gguf_context *ctx, const int k) {
  2009. return ArrayInfo {
  2010. gguf_get_arr_type(ctx, k),
  2011. size_t(gguf_get_arr_n(ctx, k)),
  2012. gguf_get_arr_data(ctx, k),
  2013. };
  2014. }
  2015. };
  2016. template<typename T>
  2017. class GKV: public GKV_Base<T> {
  2018. GKV() = delete;
  2019. public:
  2020. static T get_kv(const gguf_context * ctx, const int k) {
  2021. const enum gguf_type kt = gguf_get_kv_type(ctx, k);
  2022. if (kt != GKV::gt) {
  2023. throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
  2024. gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt)));
  2025. }
  2026. return GKV::getter(ctx, k);
  2027. }
  2028. static const char * override_type_to_str(const llama_model_kv_override_type ty) {
  2029. switch (ty) {
  2030. case LLAMA_KV_OVERRIDE_BOOL: return "bool";
  2031. case LLAMA_KV_OVERRIDE_INT: return "int";
  2032. case LLAMA_KV_OVERRIDE_FLOAT: return "float";
  2033. }
  2034. return "unknown";
  2035. }
  2036. static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override *override) {
  2037. if (!override) { return false; }
  2038. if (override->tag == expected_type) {
  2039. LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
  2040. __func__, override_type_to_str(override->tag), override->key);
  2041. switch (override->tag) {
  2042. case LLAMA_KV_OVERRIDE_BOOL: {
  2043. LLAMA_LOG_INFO("%s\n", override->bool_value ? "true" : "false");
  2044. } break;
  2045. case LLAMA_KV_OVERRIDE_INT: {
  2046. LLAMA_LOG_INFO("%" PRId64 "\n", override->int_value);
  2047. } break;
  2048. case LLAMA_KV_OVERRIDE_FLOAT: {
  2049. LLAMA_LOG_INFO("%.6f\n", override->float_value);
  2050. } break;
  2051. default:
  2052. // Shouldn't be possible to end up here, but just in case...
  2053. throw std::runtime_error(
  2054. format("Unsupported attempt to override %s type for metadata key %s\n",
  2055. override_type_to_str(override->tag), override->key));
  2056. }
  2057. return true;
  2058. }
  2059. LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
  2060. __func__, override->key, override_type_to_str(expected_type), override_type_to_str(override->tag));
  2061. return false;
  2062. }
  2063. template<typename OT>
  2064. static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
  2065. try_override(OT & target, const struct llama_model_kv_override *override) {
  2066. if (validate_override(LLAMA_KV_OVERRIDE_BOOL, override)) {
  2067. target = override->bool_value;
  2068. return true;
  2069. }
  2070. return false;
  2071. }
  2072. template<typename OT>
  2073. static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
  2074. try_override(OT & target, const struct llama_model_kv_override *override) {
  2075. if (validate_override(LLAMA_KV_OVERRIDE_INT, override)) {
  2076. target = override->int_value;
  2077. return true;
  2078. }
  2079. return false;
  2080. }
  2081. template<typename OT>
  2082. static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
  2083. try_override(T & target, const struct llama_model_kv_override *override) {
  2084. if (validate_override(LLAMA_KV_OVERRIDE_FLOAT, override)) {
  2085. target = override->float_value;
  2086. return true;
  2087. }
  2088. return false;
  2089. }
  2090. template<typename OT>
  2091. static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
  2092. try_override(T & target, const struct llama_model_kv_override *override) {
  2093. (void)target;
  2094. (void)override;
  2095. if (!override) { return false; }
  2096. // Currently, we should never end up here so it would be a bug if we do.
  2097. throw std::runtime_error(format("Unsupported attempt to override string type for metadata key %s\n",
  2098. override ? override->key : "NULL"));
  2099. }
  2100. static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override *override = nullptr) {
  2101. if (try_override<T>(target, override)) {
  2102. return true;
  2103. }
  2104. if (k < 0) { return false; }
  2105. target = get_kv(ctx, k);
  2106. return true;
  2107. }
  2108. static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override *override = nullptr) {
  2109. return set(ctx, gguf_find_key(ctx, key), target, override);
  2110. }
  2111. static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override *override = nullptr) {
  2112. return set(ctx, key.c_str(), target, override);
  2113. }
  2114. };
  2115. }
  2116. struct llama_model_loader {
  2117. int n_kv = 0;
  2118. int n_tensors = 0;
  2119. int n_created = 0;
  2120. int64_t n_elements = 0;
  2121. size_t n_bytes = 0;
  2122. bool use_mmap = false;
  2123. llama_file file;
  2124. llama_ftype ftype;
  2125. llama_fver fver;
  2126. std::unique_ptr<llama_mmap> mapping;
  2127. std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
  2128. struct gguf_context * ctx_gguf = NULL;
  2129. struct ggml_context * ctx_meta = NULL;
  2130. std::string arch_name;
  2131. LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
  2132. llama_model_loader(const std::string & fname, bool use_mmap, const struct llama_model_kv_override * param_overrides_p) : file(fname.c_str(), "rb") {
  2133. int trace = 0;
  2134. if (getenv("LLAMA_TRACE")) {
  2135. trace = atoi(getenv("LLAMA_TRACE"));
  2136. }
  2137. struct gguf_init_params params = {
  2138. /*.no_alloc = */ true,
  2139. /*.ctx = */ &ctx_meta,
  2140. };
  2141. if (param_overrides_p != nullptr) {
  2142. for (const struct llama_model_kv_override *p = param_overrides_p; p->key[0] != 0; p++) {
  2143. kv_overrides.insert({std::string(p->key), *p});
  2144. }
  2145. }
  2146. ctx_gguf = gguf_init_from_file(fname.c_str(), params);
  2147. if (!ctx_gguf) {
  2148. throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
  2149. }
  2150. get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
  2151. llm_kv = LLM_KV(llm_arch_from_string(arch_name));
  2152. n_kv = gguf_get_n_kv(ctx_gguf);
  2153. n_tensors = gguf_get_n_tensors(ctx_gguf);
  2154. fver = (enum llama_fver ) gguf_get_version(ctx_gguf);
  2155. for (int i = 0; i < n_tensors; i++) {
  2156. const char * name = gguf_get_tensor_name(ctx_gguf, i);
  2157. struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name);
  2158. n_elements += ggml_nelements(t);
  2159. n_bytes += ggml_nbytes(t);
  2160. }
  2161. LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
  2162. __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
  2163. // determine file type based on the number of tensors for each quantization and print meta data
  2164. // TODO: make optional
  2165. {
  2166. std::map<enum ggml_type, uint32_t> n_type;
  2167. uint32_t n_type_max = 0;
  2168. enum ggml_type type_max = GGML_TYPE_F32;
  2169. for (int i = 0; i < n_tensors; i++) {
  2170. enum ggml_type type = gguf_get_tensor_type(ctx_gguf, i);
  2171. n_type[type]++;
  2172. if (n_type_max < n_type[type]) {
  2173. n_type_max = n_type[type];
  2174. type_max = type;
  2175. }
  2176. if (trace > 0) {
  2177. struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i));
  2178. LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, ggml_get_name(meta), ggml_type_name(type), llama_format_tensor_shape(meta).c_str());
  2179. }
  2180. }
  2181. switch (type_max) {
  2182. case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
  2183. case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
  2184. case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
  2185. case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
  2186. case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
  2187. case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
  2188. case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
  2189. case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
  2190. case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
  2191. case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
  2192. case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
  2193. case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
  2194. case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
  2195. case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
  2196. case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
  2197. default:
  2198. {
  2199. LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
  2200. ftype = LLAMA_FTYPE_ALL_F32;
  2201. } break;
  2202. }
  2203. // this is a way to mark that we have "guessed" the file type
  2204. ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
  2205. {
  2206. const int kid = gguf_find_key(ctx_gguf, "general.file_type");
  2207. if (kid >= 0) {
  2208. ftype = (llama_ftype) gguf_get_val_u32(ctx_gguf, kid);
  2209. }
  2210. }
  2211. LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
  2212. for (int i = 0; i < n_kv; i++) {
  2213. const char * name = gguf_get_key(ctx_gguf, i);
  2214. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  2215. const std::string type_name =
  2216. type == GGUF_TYPE_ARRAY
  2217. ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx_gguf, i)), gguf_get_arr_n(ctx_gguf, i))
  2218. : gguf_type_name(type);
  2219. std::string value = gguf_kv_to_str(ctx_gguf, i);
  2220. const size_t MAX_VALUE_LEN = 40;
  2221. if (value.size() > MAX_VALUE_LEN) {
  2222. value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
  2223. }
  2224. replace_all(value, "\n", "\\n");
  2225. LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
  2226. }
  2227. // print type counts
  2228. for (auto & kv : n_type) {
  2229. if (kv.second == 0) {
  2230. continue;
  2231. }
  2232. LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
  2233. }
  2234. }
  2235. if (!llama_mmap::SUPPORTED) {
  2236. LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
  2237. use_mmap = false;
  2238. }
  2239. this->use_mmap = use_mmap;
  2240. }
  2241. ~llama_model_loader() {
  2242. if (ctx_gguf) {
  2243. gguf_free(ctx_gguf);
  2244. }
  2245. if (ctx_meta) {
  2246. ggml_free(ctx_meta);
  2247. }
  2248. }
  2249. template<typename T>
  2250. typename std::enable_if<std::is_integral<T>::value, bool>::type
  2251. get_arr_n(const std::string & key, T & result, const bool required = true) {
  2252. const int kid = gguf_find_key(ctx_gguf, key.c_str());
  2253. if (kid < 0) {
  2254. if (required) {
  2255. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  2256. }
  2257. return false;
  2258. }
  2259. struct GGUFMeta::ArrayInfo arr_info =
  2260. GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(ctx_gguf, kid);
  2261. result = arr_info.length;
  2262. return true;
  2263. }
  2264. template<typename T>
  2265. typename std::enable_if<std::is_integral<T>::value, bool>::type
  2266. get_arr_n(const enum llm_kv kid, T & result, const bool required = true) {
  2267. return get_arr_n(llm_kv(kid), result, required);
  2268. }
  2269. template<typename T>
  2270. bool get_key(const std::string & key, T & result, const bool required = true) {
  2271. auto it = kv_overrides.find(key);
  2272. const struct llama_model_kv_override * override =
  2273. it != kv_overrides.end() ? &it->second : nullptr;
  2274. const bool found = GGUFMeta::GKV<T>::set(ctx_gguf, key, result, override);
  2275. if (required && !found) {
  2276. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  2277. }
  2278. return found;
  2279. }
  2280. template<typename T>
  2281. bool get_key(const enum llm_kv kid, T & result, const bool required = true) {
  2282. return get_key(llm_kv(kid), result, required);
  2283. }
  2284. std::string get_arch_name() const {
  2285. return arch_name;
  2286. }
  2287. enum llm_arch get_arch() const {
  2288. return llm_kv.arch;
  2289. }
  2290. const char * get_tensor_name(int i) const {
  2291. return gguf_get_tensor_name(ctx_gguf, i);
  2292. }
  2293. struct ggml_tensor * get_tensor_meta(const char * name) const {
  2294. return ggml_get_tensor(ctx_meta, name);
  2295. }
  2296. struct ggml_tensor * get_tensor_meta(int i) const {
  2297. return get_tensor_meta(get_tensor_name(i));
  2298. }
  2299. struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta) {
  2300. struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta);
  2301. ggml_set_name(tensor, ggml_get_name(meta));
  2302. n_created++;
  2303. return tensor;
  2304. }
  2305. struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, bool required = true) {
  2306. struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
  2307. if (cur == NULL) {
  2308. if (!required) {
  2309. return NULL;
  2310. }
  2311. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
  2312. }
  2313. {
  2314. bool is_ok = true;
  2315. for (size_t i = 0; i < ne.size(); ++i) {
  2316. if (ne[i] != cur->ne[i]) {
  2317. is_ok = false;
  2318. break;
  2319. }
  2320. }
  2321. if (!is_ok) {
  2322. throw std::runtime_error(
  2323. format("%s: tensor '%s' has wrong shape; expected %s, got %s",
  2324. __func__, name.c_str(),
  2325. llama_format_tensor_shape(ne).c_str(),
  2326. llama_format_tensor_shape(cur).c_str()));
  2327. }
  2328. }
  2329. return create_tensor_for(ctx, cur);
  2330. }
  2331. void done_getting_tensors() const {
  2332. if (n_created != n_tensors) {
  2333. throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
  2334. }
  2335. }
  2336. size_t file_offset(const char * name) const {
  2337. const int idx = gguf_find_tensor(ctx_gguf, name);
  2338. if (idx < 0) {
  2339. throw std::runtime_error(format("%s: tensor '%s' not found in the file", __func__, name));
  2340. }
  2341. return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx);
  2342. }
  2343. void init_mapping(bool prefetch = true, llama_mlock * lmlock = nullptr) {
  2344. // prefetch the whole file - all the data is needed anyway
  2345. if (use_mmap) {
  2346. mapping.reset(new llama_mmap(&file, prefetch ? -1 : 0, ggml_is_numa()));
  2347. }
  2348. // compute the total size of all tensors for progress reporting
  2349. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  2350. struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i));
  2351. size_data += ggml_nbytes(cur);
  2352. }
  2353. if (use_mmap && mapping) {
  2354. if (lmlock) {
  2355. lmlock->init(mapping->addr);
  2356. }
  2357. mmap_used_first = mapping->size;
  2358. }
  2359. }
  2360. void get_mapping_range(size_t * first, size_t * last, ggml_context * ctx) const {
  2361. GGML_ASSERT(mapping);
  2362. *first = mapping->size;
  2363. *last = 0;
  2364. for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) {
  2365. const size_t offs = file_offset(ggml_get_name(tensor));
  2366. *first = std::min(*first, offs);
  2367. *last = std::max(*last, offs + ggml_nbytes(tensor));
  2368. }
  2369. }
  2370. // for backwards compatibility, does not support ggml-backend
  2371. void load_data_for(struct ggml_tensor * cur) const {
  2372. const size_t offs = file_offset(ggml_get_name(cur));
  2373. if (use_mmap && mapping) {
  2374. if (cur->data == nullptr) {
  2375. cur->data = (uint8_t *)mapping->addr + offs;
  2376. } else {
  2377. memcpy(cur->data, (uint8_t *)mapping->addr + offs, ggml_nbytes(cur));
  2378. }
  2379. } else {
  2380. GGML_ASSERT(cur->data != nullptr);
  2381. file.seek(offs, SEEK_SET);
  2382. file.read_raw(cur->data, ggml_nbytes(cur));
  2383. }
  2384. }
  2385. size_t size_done = 0;
  2386. size_t size_data = 0;
  2387. size_t mmap_used_first = -1;
  2388. size_t mmap_used_last = 0;
  2389. // Returns false if cancelled by progress_callback
  2390. bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) {
  2391. GGML_ASSERT(size_data != 0 && "call init_mapping() first");
  2392. std::vector<no_init<uint8_t>> read_buf;
  2393. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  2394. struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
  2395. if (!cur) {
  2396. // some tensors may be allocated in a different context
  2397. continue;
  2398. }
  2399. if (progress_callback) {
  2400. if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
  2401. return false;
  2402. }
  2403. }
  2404. const size_t offs = file_offset(ggml_get_name(cur));
  2405. if (use_mmap && mapping) {
  2406. if (buf_mmap && cur->data == nullptr) {
  2407. ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + offs);
  2408. if (lmlock) {
  2409. lmlock->grow_to(offs + ggml_nbytes(cur));
  2410. }
  2411. mmap_used_first = std::min(mmap_used_first, offs);
  2412. mmap_used_last = std::max(mmap_used_last, offs + ggml_nbytes(cur));
  2413. } else {
  2414. ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + offs, 0, ggml_nbytes(cur));
  2415. }
  2416. } else {
  2417. if (ggml_backend_buffer_is_host(cur->buffer)) {
  2418. file.seek(offs, SEEK_SET);
  2419. file.read_raw(cur->data, ggml_nbytes(cur));
  2420. } else {
  2421. read_buf.resize(ggml_nbytes(cur));
  2422. file.seek(offs, SEEK_SET);
  2423. file.read_raw(read_buf.data(), ggml_nbytes(cur));
  2424. ggml_backend_tensor_set(cur, read_buf.data(), 0, ggml_nbytes(cur));
  2425. }
  2426. }
  2427. size_done += ggml_nbytes(cur);
  2428. }
  2429. // check if this is the last call and do final cleanup
  2430. if (size_done >= size_data) {
  2431. // unmap offloaded tensors and metadata
  2432. if (use_mmap && mapping) {
  2433. mapping->unmap_fragment(0, mmap_used_first);
  2434. if (mmap_used_last != 0) {
  2435. mapping->unmap_fragment(mmap_used_last, mapping->size);
  2436. }
  2437. }
  2438. if (progress_callback) {
  2439. // Even though the model is done loading, we still honor
  2440. // cancellation since we need to free allocations.
  2441. return progress_callback(1.0f, progress_callback_user_data);
  2442. }
  2443. }
  2444. return true;
  2445. }
  2446. };
  2447. //
  2448. // load LLaMA models
  2449. //
  2450. static const char * llama_model_arch_name(llm_arch arch) {
  2451. auto it = LLM_ARCH_NAMES.find(arch);
  2452. if (it == LLM_ARCH_NAMES.end()) {
  2453. return "unknown";
  2454. }
  2455. return it->second;
  2456. }
  2457. static std::string llama_model_ftype_name(llama_ftype ftype) {
  2458. if (ftype & LLAMA_FTYPE_GUESSED) {
  2459. return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
  2460. }
  2461. switch (ftype) {
  2462. case LLAMA_FTYPE_ALL_F32: return "all F32";
  2463. case LLAMA_FTYPE_MOSTLY_F16: return "F16";
  2464. case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
  2465. case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
  2466. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  2467. return "Q4_1, some F16";
  2468. case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
  2469. case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
  2470. case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
  2471. // K-quants
  2472. case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium";
  2473. case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small";
  2474. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
  2475. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
  2476. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
  2477. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
  2478. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
  2479. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
  2480. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
  2481. case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
  2482. case LLAMA_FTYPE_MOSTLY_IQ2_XXS:return "IQ2_XXS - 2.0625 bpw";
  2483. case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw";
  2484. case LLAMA_FTYPE_MOSTLY_Q3_K_XS:return "Q3_K - Extra small";
  2485. case LLAMA_FTYPE_MOSTLY_IQ3_XXS:return "IQ3_XXS - 3.0625 bpw";
  2486. default: return "unknown, may not work";
  2487. }
  2488. }
  2489. static const char * llama_model_type_name(e_model type) {
  2490. switch (type) {
  2491. case MODEL_22M: return "22M";
  2492. case MODEL_33M: return "33M";
  2493. case MODEL_109M: return "109M";
  2494. case MODEL_137M: return "137M";
  2495. case MODEL_0_5B: return "0.5B";
  2496. case MODEL_1B: return "1B";
  2497. case MODEL_2B: return "2B";
  2498. case MODEL_3B: return "3B";
  2499. case MODEL_7B: return "7B";
  2500. case MODEL_8B: return "8B";
  2501. case MODEL_13B: return "13B";
  2502. case MODEL_14B: return "14B";
  2503. case MODEL_15B: return "15B";
  2504. case MODEL_20B: return "20B";
  2505. case MODEL_30B: return "30B";
  2506. case MODEL_34B: return "34B";
  2507. case MODEL_40B: return "40B";
  2508. case MODEL_65B: return "65B";
  2509. case MODEL_70B: return "70B";
  2510. case MODEL_SMALL: return "0.1B";
  2511. case MODEL_MEDIUM: return "0.4B";
  2512. case MODEL_LARGE: return "0.8B";
  2513. case MODEL_XL: return "1.5B";
  2514. default: return "?B";
  2515. }
  2516. }
  2517. static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
  2518. switch (type) {
  2519. case LLAMA_VOCAB_TYPE_SPM: return "SPM";
  2520. case LLAMA_VOCAB_TYPE_BPE: return "BPE";
  2521. case LLAMA_VOCAB_TYPE_WPM: return "WPM";
  2522. default: return "unknown";
  2523. }
  2524. }
  2525. static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
  2526. model.arch = ml.get_arch();
  2527. if (model.arch == LLM_ARCH_UNKNOWN) {
  2528. throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
  2529. }
  2530. }
  2531. static void llm_load_hparams(
  2532. llama_model_loader & ml,
  2533. llama_model & model) {
  2534. auto & hparams = model.hparams;
  2535. const gguf_context * ctx = ml.ctx_gguf;
  2536. // get metadata as string
  2537. for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
  2538. enum gguf_type type = gguf_get_kv_type(ctx, i);
  2539. if (type == GGUF_TYPE_ARRAY) {
  2540. continue;
  2541. }
  2542. const char * name = gguf_get_key(ctx, i);
  2543. const std::string value = gguf_kv_to_str(ctx, i);
  2544. model.gguf_kv.emplace(name, value);
  2545. }
  2546. // get general kv
  2547. ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
  2548. // get hparams kv
  2549. ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
  2550. ml.get_key (LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
  2551. ml.get_key (LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
  2552. ml.get_key (LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
  2553. ml.get_key (LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head);
  2554. ml.get_key (LLM_KV_BLOCK_COUNT, hparams.n_layer);
  2555. ml.get_key (LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
  2556. ml.get_key (LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
  2557. GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
  2558. GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
  2559. if (hparams.n_expert > 0) {
  2560. GGML_ASSERT(hparams.n_expert_used > 0);
  2561. } else {
  2562. GGML_ASSERT(hparams.n_expert_used == 0);
  2563. }
  2564. // n_head_kv is optional, default to n_head
  2565. hparams.n_head_kv = hparams.n_head;
  2566. ml.get_key(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv, false);
  2567. bool rope_finetuned = false;
  2568. ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
  2569. hparams.rope_finetuned = rope_finetuned;
  2570. hparams.n_yarn_orig_ctx = hparams.n_ctx_train;
  2571. ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_yarn_orig_ctx, false);
  2572. // rope_freq_base (optional)
  2573. hparams.rope_freq_base_train = 10000.0f;
  2574. ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
  2575. std::string rope_scaling("linear");
  2576. ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
  2577. hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
  2578. GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_UNSPECIFIED);
  2579. // rope_freq_scale (inverse of the kv) is optional
  2580. float ropescale = 0.0f;
  2581. if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
  2582. // try the old key name
  2583. ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
  2584. }
  2585. hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
  2586. // sanity check for n_rot (optional)
  2587. {
  2588. hparams.n_rot = hparams.n_embd / hparams.n_head;
  2589. ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
  2590. if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
  2591. if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
  2592. throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
  2593. }
  2594. }
  2595. // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
  2596. // gpt-j n_rot = rotary_dim
  2597. }
  2598. hparams.n_embd_head_k = hparams.n_embd / hparams.n_head;
  2599. ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
  2600. hparams.n_embd_head_v = hparams.n_embd / hparams.n_head;
  2601. ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
  2602. // arch-specific KVs
  2603. switch (model.arch) {
  2604. case LLM_ARCH_LLAMA:
  2605. {
  2606. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2607. switch (hparams.n_layer) {
  2608. case 22: model.type = e_model::MODEL_1B; break;
  2609. case 26: model.type = e_model::MODEL_3B; break;
  2610. case 32: model.type = e_model::MODEL_7B; break;
  2611. case 40: model.type = e_model::MODEL_13B; break;
  2612. case 48: model.type = e_model::MODEL_34B; break;
  2613. case 60: model.type = e_model::MODEL_30B; break;
  2614. case 80: model.type = hparams.n_head == hparams.n_head_kv ? e_model::MODEL_65B : e_model::MODEL_70B; break;
  2615. default: model.type = e_model::MODEL_UNKNOWN;
  2616. }
  2617. } break;
  2618. case LLM_ARCH_MINICPM:
  2619. {
  2620. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2621. switch (hparams.n_layer) {
  2622. case 40: model.type = e_model::MODEL_2B; break;
  2623. default: model.type = e_model::MODEL_UNKNOWN;
  2624. }
  2625. } break;
  2626. case LLM_ARCH_FALCON:
  2627. {
  2628. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2629. switch (hparams.n_layer) {
  2630. case 32: model.type = e_model::MODEL_7B; break;
  2631. case 60: model.type = e_model::MODEL_40B; break;
  2632. default: model.type = e_model::MODEL_UNKNOWN;
  2633. }
  2634. } break;
  2635. case LLM_ARCH_BAICHUAN:
  2636. {
  2637. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2638. switch (hparams.n_layer) {
  2639. case 32: model.type = e_model::MODEL_7B; break;
  2640. case 40: model.type = e_model::MODEL_13B; break;
  2641. default: model.type = e_model::MODEL_UNKNOWN;
  2642. }
  2643. } break;
  2644. case LLM_ARCH_STARCODER:
  2645. {
  2646. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2647. switch (hparams.n_layer) {
  2648. case 24: model.type = e_model::MODEL_1B; break;
  2649. case 36: model.type = e_model::MODEL_3B; break;
  2650. case 42: model.type = e_model::MODEL_7B; break;
  2651. case 40: model.type = e_model::MODEL_15B; break;
  2652. default: model.type = e_model::MODEL_UNKNOWN;
  2653. }
  2654. } break;
  2655. case LLM_ARCH_PERSIMMON:
  2656. {
  2657. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2658. switch (hparams.n_layer) {
  2659. case 36: model.type = e_model::MODEL_8B; break;
  2660. default: model.type = e_model::MODEL_UNKNOWN;
  2661. }
  2662. } break;
  2663. case LLM_ARCH_REFACT:
  2664. {
  2665. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2666. switch (hparams.n_layer) {
  2667. case 32: model.type = e_model::MODEL_1B; break;
  2668. default: model.type = e_model::MODEL_UNKNOWN;
  2669. }
  2670. } break;
  2671. case LLM_ARCH_BERT:
  2672. {
  2673. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2674. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  2675. ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
  2676. ml.get_key(LLM_KV_POOLING_LAYER, hparams.pooling_layer);
  2677. switch (hparams.n_layer) {
  2678. case 3:
  2679. model.type = e_model::MODEL_17M; break; // bge-micro
  2680. case 6:
  2681. model.type = e_model::MODEL_22M; break; // MiniLM-L6
  2682. case 12:
  2683. switch (hparams.n_embd) {
  2684. case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small
  2685. case 768: model.type = e_model::MODEL_109M; break; // bge-base
  2686. } break;
  2687. case 24:
  2688. model.type = e_model::MODEL_335M; break; // bge-large
  2689. }
  2690. } break;
  2691. case LLM_ARCH_NOMIC_BERT:
  2692. {
  2693. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2694. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  2695. ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
  2696. ml.get_key(LLM_KV_POOLING_LAYER, hparams.pooling_layer);
  2697. if (hparams.n_layer == 12 && hparams.n_embd == 768) {
  2698. model.type = e_model::MODEL_137M;
  2699. }
  2700. } break;
  2701. case LLM_ARCH_BLOOM:
  2702. {
  2703. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2704. switch (hparams.n_layer) {
  2705. case 24: model.type = e_model::MODEL_1B; break;
  2706. case 30:
  2707. switch (hparams.n_embd) {
  2708. case 2560: model.type = e_model::MODEL_3B; break;
  2709. case 4096: model.type = e_model::MODEL_7B; break;
  2710. } break;
  2711. }
  2712. } break;
  2713. case LLM_ARCH_MPT:
  2714. {
  2715. hparams.f_clamp_kqv = 0.0f;
  2716. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2717. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  2718. ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
  2719. switch (hparams.n_layer) {
  2720. case 32: model.type = e_model::MODEL_7B; break;
  2721. case 48: model.type = e_model::MODEL_30B; break;
  2722. default: model.type = e_model::MODEL_UNKNOWN;
  2723. }
  2724. } break;
  2725. case LLM_ARCH_STABLELM:
  2726. {
  2727. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2728. switch (hparams.n_layer) {
  2729. case 24: model.type = e_model::MODEL_1B; break;
  2730. case 32: model.type = e_model::MODEL_3B; break;
  2731. default: model.type = e_model::MODEL_UNKNOWN;
  2732. }
  2733. } break;
  2734. case LLM_ARCH_QWEN:
  2735. {
  2736. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2737. switch (hparams.n_layer) {
  2738. case 32: model.type = e_model::MODEL_7B; break;
  2739. case 40: model.type = e_model::MODEL_13B; break;
  2740. default: model.type = e_model::MODEL_UNKNOWN;
  2741. }
  2742. } break;
  2743. case LLM_ARCH_QWEN2:
  2744. {
  2745. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2746. switch (hparams.n_layer) {
  2747. case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break;
  2748. case 32: model.type = e_model::MODEL_7B; break;
  2749. case 40: model.type = hparams.n_head == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break;
  2750. case 80: model.type = e_model::MODEL_70B; break;
  2751. default: model.type = e_model::MODEL_UNKNOWN;
  2752. }
  2753. } break;
  2754. case LLM_ARCH_PHI2:
  2755. {
  2756. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2757. switch (hparams.n_layer) {
  2758. case 24: model.type = e_model::MODEL_1B; break;
  2759. case 32: model.type = e_model::MODEL_3B; break;
  2760. default: model.type = e_model::MODEL_UNKNOWN;
  2761. }
  2762. } break;
  2763. case LLM_ARCH_PLAMO:
  2764. {
  2765. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2766. switch (hparams.n_layer) {
  2767. case 40: model.type = e_model::MODEL_13B; break;
  2768. default: model.type = e_model::MODEL_UNKNOWN;
  2769. }
  2770. } break;
  2771. case LLM_ARCH_GPT2:
  2772. {
  2773. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2774. switch (hparams.n_layer) {
  2775. case 12: model.type = e_model::MODEL_SMALL; break;
  2776. case 24: model.type = e_model::MODEL_MEDIUM; break;
  2777. case 36: model.type = e_model::MODEL_LARGE; break;
  2778. case 48: model.type = e_model::MODEL_XL; break;
  2779. default: model.type = e_model::MODEL_UNKNOWN;
  2780. }
  2781. } break;
  2782. case LLM_ARCH_CODESHELL:
  2783. {
  2784. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2785. switch (hparams.n_layer) {
  2786. case 42: model.type = e_model::MODEL_SMALL; break;
  2787. default: model.type = e_model::MODEL_UNKNOWN;
  2788. }
  2789. } break;
  2790. case LLM_ARCH_ORION:
  2791. {
  2792. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2793. switch (hparams.n_layer) {
  2794. case 40: model.type = e_model::MODEL_14B; break;
  2795. default: model.type = e_model::MODEL_UNKNOWN;
  2796. }
  2797. } break;
  2798. case LLM_ARCH_INTERNLM2:
  2799. {
  2800. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2801. switch (hparams.n_layer) {
  2802. case 32: model.type = e_model::MODEL_7B; break;
  2803. case 48: model.type = e_model::MODEL_20B; break;
  2804. default: model.type = e_model::MODEL_UNKNOWN;
  2805. }
  2806. } break;
  2807. default: (void)0;
  2808. }
  2809. model.ftype = ml.ftype;
  2810. }
  2811. // TODO: This should probably be in llama.h
  2812. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special = false);
  2813. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch);
  2814. static void llm_load_vocab(
  2815. llama_model_loader & ml,
  2816. llama_model & model) {
  2817. auto & vocab = model.vocab;
  2818. struct gguf_context * ctx = ml.ctx_gguf;
  2819. const auto kv = LLM_KV(model.arch);
  2820. const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
  2821. if (token_idx == -1) {
  2822. throw std::runtime_error("cannot find tokenizer vocab in model file\n");
  2823. }
  2824. const float * scores = nullptr;
  2825. const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
  2826. if (score_idx != -1) {
  2827. scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
  2828. }
  2829. const int * toktypes = nullptr;
  2830. const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
  2831. if (toktype_idx != -1) {
  2832. toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
  2833. }
  2834. // determine vocab type
  2835. {
  2836. std::string tokenizer_name;
  2837. ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_name);
  2838. if (tokenizer_name == "llama") {
  2839. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  2840. // default special tokens
  2841. vocab.special_bos_id = 1;
  2842. vocab.special_eos_id = 2;
  2843. vocab.special_unk_id = 0;
  2844. vocab.special_sep_id = -1;
  2845. vocab.special_pad_id = -1;
  2846. const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
  2847. if (add_space_prefix_keyidx != -1) {
  2848. vocab.add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
  2849. } // The default value of add_space_prefix is true.
  2850. } else if (tokenizer_name == "gpt2") {
  2851. vocab.type = LLAMA_VOCAB_TYPE_BPE;
  2852. // read bpe merges and populate bpe ranks
  2853. const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
  2854. if (merges_keyidx == -1) {
  2855. throw std::runtime_error("cannot find tokenizer merges in model file\n");
  2856. }
  2857. const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
  2858. for (int i = 0; i < n_merges; i++) {
  2859. const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
  2860. GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
  2861. std::string first;
  2862. std::string second;
  2863. const size_t pos = word.find(' ', 1);
  2864. if (pos != std::string::npos) {
  2865. first = word.substr(0, pos);
  2866. second = word.substr(pos + 1);
  2867. }
  2868. vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
  2869. }
  2870. // default special tokens
  2871. vocab.special_bos_id = 11;
  2872. vocab.special_eos_id = 11;
  2873. vocab.special_unk_id = -1;
  2874. vocab.special_sep_id = -1;
  2875. vocab.special_pad_id = -1;
  2876. } else if (tokenizer_name == "bert") {
  2877. vocab.type = LLAMA_VOCAB_TYPE_WPM;
  2878. // default special tokens
  2879. vocab.special_bos_id = 101;
  2880. vocab.special_eos_id = 102;
  2881. vocab.special_unk_id = 100;
  2882. vocab.special_sep_id = -1;
  2883. vocab.special_pad_id = -1;
  2884. vocab.add_space_prefix = false;
  2885. } else {
  2886. LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str());
  2887. LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__);
  2888. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  2889. }
  2890. }
  2891. const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
  2892. vocab.id_to_token.resize(n_vocab);
  2893. for (uint32_t i = 0; i < n_vocab; i++) {
  2894. std::string word = gguf_get_arr_str(ctx, token_idx, i);
  2895. GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
  2896. vocab.token_to_id[word] = i;
  2897. auto & token_data = vocab.id_to_token[i];
  2898. token_data.text = std::move(word);
  2899. token_data.score = scores ? scores[i] : 0.0f;
  2900. token_data.type = toktypes ? (llama_token_type) toktypes[i] : LLAMA_TOKEN_TYPE_NORMAL;
  2901. }
  2902. GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
  2903. // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
  2904. if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
  2905. try {
  2906. vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
  2907. } catch (const std::exception & e) {
  2908. LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what());
  2909. vocab.linefeed_id = vocab.special_pad_id;
  2910. }
  2911. } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
  2912. vocab.linefeed_id = vocab.special_pad_id;
  2913. } else {
  2914. const std::vector<int> ids = llama_tokenize_internal(vocab, "\u010A", false);
  2915. GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
  2916. vocab.linefeed_id = ids[0];
  2917. }
  2918. // special tokens
  2919. {
  2920. const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
  2921. { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
  2922. { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
  2923. { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
  2924. { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
  2925. { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
  2926. };
  2927. for (const auto & it : special_token_types) {
  2928. const std::string & key = kv(std::get<0>(it));
  2929. int32_t & id = std::get<1>(it);
  2930. uint32_t new_id;
  2931. if (!ml.get_key(std::get<0>(it), new_id, false)) {
  2932. continue;
  2933. }
  2934. if (new_id >= vocab.id_to_token.size()) {
  2935. LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
  2936. __func__, key.c_str(), new_id, id);
  2937. } else {
  2938. id = new_id;
  2939. }
  2940. }
  2941. // Handle add_bos_token and add_eos_token
  2942. {
  2943. bool temp = true;
  2944. if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
  2945. vocab.special_add_bos = int(temp);
  2946. }
  2947. if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
  2948. vocab.special_add_eos = int(temp);
  2949. }
  2950. }
  2951. }
  2952. // build special tokens cache
  2953. {
  2954. // TODO: It is unclear (to me) at this point, whether special tokes are guaranteed to be of a deterministic type,
  2955. // and will always be correctly labeled in 'added_tokens.json' etc.
  2956. // The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
  2957. // to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
  2958. // are special tokens.
  2959. // From testing, this appears to correlate 1:1 with special tokens.
  2960. //
  2961. // Counting special tokens and verifying in only one direction
  2962. // is sufficient to detect difference in those two sets.
  2963. //
  2964. uint32_t special_tokens_count_by_type = 0;
  2965. uint32_t special_tokens_count_from_verification = 0;
  2966. bool special_tokens_definition_mismatch = false;
  2967. for (const auto & t : vocab.token_to_id) {
  2968. const auto & token = t.first;
  2969. const auto & id = t.second;
  2970. // Count all non-normal tokens in the vocab while iterating
  2971. if (vocab.id_to_token[id].type != LLAMA_TOKEN_TYPE_NORMAL) {
  2972. special_tokens_count_by_type++;
  2973. }
  2974. // Skip single character tokens
  2975. if (token.length() > 1) {
  2976. bool is_tokenizable = false;
  2977. // Split token string representation in two, in all possible ways
  2978. // and check if both halves can be matched to a valid token
  2979. for (unsigned i = 1; i < token.length();) {
  2980. const auto left = token.substr(0, i);
  2981. const auto right = token.substr(i);
  2982. // check if we didnt partition in the middle of a utf sequence
  2983. auto utf = utf8_len(left.at(left.length() - 1));
  2984. if (utf == 1) {
  2985. if (vocab.token_to_id.find(left) != vocab.token_to_id.end() &&
  2986. vocab.token_to_id.find(right) != vocab.token_to_id.end() ) {
  2987. is_tokenizable = true;
  2988. break;
  2989. }
  2990. i++;
  2991. } else {
  2992. // skip over the rest of multibyte utf sequence
  2993. i += utf - 1;
  2994. }
  2995. }
  2996. if (!is_tokenizable) {
  2997. // Some tokens are multibyte, but they are utf sequences with equivalent text length of 1
  2998. // it's faster to re-filter them here, since there are way less candidates now
  2999. // Calculate a total "utf" length of a token string representation
  3000. size_t utf8_str_len = 0;
  3001. for (unsigned i = 0; i < token.length();) {
  3002. utf8_str_len++;
  3003. i += utf8_len(token.at(i));
  3004. }
  3005. // And skip the ones which are one character
  3006. if (utf8_str_len > 1) {
  3007. // At this point what we have left are special tokens only
  3008. vocab.special_tokens_cache[token] = id;
  3009. // Count manually found special tokens
  3010. special_tokens_count_from_verification++;
  3011. // If this manually found special token is not marked as such, flag a mismatch
  3012. if (vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL) {
  3013. special_tokens_definition_mismatch = true;
  3014. }
  3015. }
  3016. }
  3017. }
  3018. }
  3019. if (special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type) {
  3020. LLAMA_LOG_WARN("%s: mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
  3021. __func__,
  3022. special_tokens_count_from_verification, vocab.id_to_token.size(),
  3023. special_tokens_count_by_type, vocab.id_to_token.size()
  3024. );
  3025. } else {
  3026. LLAMA_LOG_INFO("%s: special tokens definition check successful ( %u/%zu ).\n",
  3027. __func__,
  3028. special_tokens_count_from_verification, vocab.id_to_token.size()
  3029. );
  3030. }
  3031. }
  3032. }
  3033. static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
  3034. const auto & hparams = model.hparams;
  3035. const auto & vocab = model.vocab;
  3036. const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
  3037. // hparams
  3038. LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
  3039. LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch));
  3040. LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, llama_model_vocab_type_name(vocab.type));
  3041. LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  3042. LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
  3043. LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
  3044. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  3045. LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
  3046. LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
  3047. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  3048. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot);
  3049. LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k);
  3050. LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v);
  3051. LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
  3052. LLAMA_LOG_INFO("%s: n_embd_k_gqa = %u\n", __func__, hparams.n_embd_k_gqa());
  3053. LLAMA_LOG_INFO("%s: n_embd_v_gqa = %u\n", __func__, hparams.n_embd_v_gqa());
  3054. LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
  3055. LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
  3056. LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
  3057. LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
  3058. LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
  3059. LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
  3060. LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
  3061. LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type);
  3062. LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
  3063. LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
  3064. LLAMA_LOG_INFO("%s: n_yarn_orig_ctx = %u\n", __func__, hparams.n_yarn_orig_ctx);
  3065. LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
  3066. LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
  3067. LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
  3068. if (ml.n_elements >= 1e12) {
  3069. LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, ml.n_elements*1e-12);
  3070. } else if (ml.n_elements >= 1e9) {
  3071. LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
  3072. } else if (ml.n_elements >= 1e6) {
  3073. LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, ml.n_elements*1e-6);
  3074. } else {
  3075. LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
  3076. }
  3077. if (ml.n_bytes < GiB) {
  3078. LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  3079. } else {
  3080. LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  3081. }
  3082. // general kv
  3083. LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
  3084. // special tokens
  3085. if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
  3086. if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
  3087. if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
  3088. if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
  3089. if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
  3090. if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
  3091. }
  3092. // Returns false if cancelled by progress_callback
  3093. static bool llm_load_tensors(
  3094. llama_model_loader & ml,
  3095. llama_model & model,
  3096. int n_gpu_layers,
  3097. enum llama_split_mode split_mode,
  3098. int main_gpu,
  3099. const float * tensor_split,
  3100. bool use_mlock,
  3101. llama_progress_callback progress_callback,
  3102. void * progress_callback_user_data) {
  3103. model.t_start_us = ggml_time_us();
  3104. auto & hparams = model.hparams;
  3105. model.split_mode = split_mode;
  3106. model.main_gpu = main_gpu;
  3107. model.n_gpu_layers = n_gpu_layers;
  3108. const int64_t n_layer = hparams.n_layer;
  3109. const int64_t i_gpu_start = std::max((int64_t) hparams.n_layer - n_gpu_layers, (int64_t) 0);
  3110. // there is very little benefit to offloading the input layer, so always keep it on the CPU
  3111. model.buft_input = llama_default_buffer_type_cpu(true);
  3112. model.buft_layer.resize(n_layer);
  3113. // assign cpu layers
  3114. for (int64_t i = 0; i < i_gpu_start; ++i) {
  3115. model.buft_layer[i] = llama_default_buffer_type_cpu(true);
  3116. }
  3117. if (split_mode == LLAMA_SPLIT_LAYER) {
  3118. // calculate the split points
  3119. int device_count = llama_get_device_count();
  3120. bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + device_count, [](float x) { return x == 0.0f; });
  3121. std::vector<float> splits(device_count);
  3122. if (all_zero) {
  3123. // default split, by free memory
  3124. for (int i = 0; i < device_count; ++i) {
  3125. splits[i] = llama_get_device_memory(i);
  3126. }
  3127. } else {
  3128. std::copy(tensor_split, tensor_split + device_count, splits.begin());
  3129. }
  3130. // sum and normalize the splits to get the split points
  3131. float split_sum = 0.0f;
  3132. for (int i = 0; i < device_count; ++i) {
  3133. split_sum += splits[i];
  3134. splits[i] = split_sum;
  3135. }
  3136. for (int i = 0; i < device_count; ++i) {
  3137. splits[i] /= split_sum;
  3138. }
  3139. // assign the repeating layers to the devices according to the splits
  3140. int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1);
  3141. for (int64_t i = i_gpu_start; i < n_layer; ++i) {
  3142. int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits.begin();
  3143. model.buft_layer[i] = llama_default_buffer_type_offload(layer_gpu);
  3144. }
  3145. // assign the output layer
  3146. if (n_gpu_layers > n_layer) {
  3147. int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(act_gpu_layers - 1)/act_gpu_layers) - splits.begin();
  3148. model.buft_output = llama_default_buffer_type_offload(layer_gpu);
  3149. } else {
  3150. model.buft_output = llama_default_buffer_type_cpu(true);
  3151. }
  3152. } else {
  3153. ggml_backend_buffer_type_t split_buft;
  3154. if (split_mode == LLAMA_SPLIT_ROW) {
  3155. split_buft = llama_default_buffer_type_split(main_gpu, tensor_split);
  3156. } else {
  3157. // LLAMA_SPLIT_NONE or LLAMA_SPLIT_LAYER in backends where it is not supported
  3158. split_buft = llama_default_buffer_type_offload(main_gpu);
  3159. }
  3160. // assign the repeating layers
  3161. for (int64_t i = i_gpu_start; i < n_layer; ++i) {
  3162. model.buft_layer[i] = {
  3163. split_buft,
  3164. llama_default_buffer_type_offload(main_gpu)
  3165. };
  3166. }
  3167. // assign the output layer
  3168. if (n_gpu_layers > n_layer) {
  3169. model.buft_output = {
  3170. split_buft,
  3171. llama_default_buffer_type_offload(main_gpu)
  3172. };
  3173. } else {
  3174. model.buft_output = llama_default_buffer_type_cpu(true);
  3175. }
  3176. }
  3177. // count used buffer types
  3178. std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
  3179. buft_layer_count[model.buft_input.buft]++;
  3180. buft_layer_count[model.buft_input.buft_matrix]++;
  3181. buft_layer_count[model.buft_output.buft]++;
  3182. buft_layer_count[model.buft_output.buft_matrix]++;
  3183. for (int64_t i = 0; i < n_layer; ++i) {
  3184. buft_layer_count[model.buft_layer[i].buft]++;
  3185. buft_layer_count[model.buft_layer[i].buft_matrix]++;
  3186. }
  3187. // create one context per buffer type
  3188. size_t ctx_size = ggml_tensor_overhead()*ml.n_tensors;
  3189. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  3190. for (auto & it : buft_layer_count) {
  3191. struct ggml_init_params params = {
  3192. /*.mem_size =*/ ctx_size,
  3193. /*.mem_buffer =*/ NULL,
  3194. /*.no_alloc =*/ true,
  3195. };
  3196. ggml_context * ctx = ggml_init(params);
  3197. if (!ctx) {
  3198. throw std::runtime_error(format("failed to create context"));
  3199. }
  3200. ctx_map[it.first] = ctx;
  3201. model.ctxs.push_back(ctx);
  3202. }
  3203. LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0);
  3204. // create tensors for the weights
  3205. {
  3206. const int64_t n_embd = hparams.n_embd;
  3207. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  3208. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  3209. const int64_t n_embd_gqa = n_embd_v_gqa;
  3210. const int64_t n_vocab = hparams.n_vocab;
  3211. const int64_t n_vocab_type = hparams.n_vocab_type;
  3212. const int64_t n_ff = hparams.n_ff;
  3213. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  3214. ggml_context * ctx_input = ctx_map.at(model.buft_input.buft);
  3215. ggml_context * ctx_output = ctx_map.at(model.buft_output.buft);
  3216. ggml_context * ctx_output_split = ctx_map.at(model.buft_output.buft_matrix);
  3217. auto ctx_for_layer = [&](int i) { return ctx_map.at(model.buft_layer[i].buft); };
  3218. auto ctx_for_layer_split = [&](int i) { return ctx_map.at(model.buft_layer[i].buft_matrix); };
  3219. model.layers.resize(n_layer);
  3220. const auto tn = LLM_TN(model.arch);
  3221. switch (model.arch) {
  3222. case LLM_ARCH_LLAMA:
  3223. case LLM_ARCH_REFACT:
  3224. case LLM_ARCH_MINICPM:
  3225. {
  3226. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3227. // output
  3228. {
  3229. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3230. if (model.arch != LLM_ARCH_MINICPM){
  3231. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3232. }
  3233. }
  3234. for (int i = 0; i < n_layer; ++i) {
  3235. ggml_context * ctx_layer = ctx_for_layer(i);
  3236. ggml_context * ctx_split = ctx_for_layer_split(i);
  3237. auto & layer = model.layers[i];
  3238. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3239. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3240. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3241. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3242. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3243. // optional bias tensors
  3244. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, false);
  3245. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, false);
  3246. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, false);
  3247. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, false);
  3248. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3249. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd}, false);
  3250. if (layer.ffn_gate_inp == nullptr) {
  3251. GGML_ASSERT(hparams.n_expert == 0);
  3252. GGML_ASSERT(hparams.n_expert_used == 0);
  3253. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3254. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3255. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3256. } else {
  3257. GGML_ASSERT(hparams.n_expert > 0);
  3258. GGML_ASSERT(hparams.n_expert_used > 0);
  3259. // MoE branch
  3260. for (uint32_t x = 0; x < hparams.n_expert; ++x) {
  3261. layer.ffn_gate_exp[x] = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), {n_embd, n_ff});
  3262. layer.ffn_down_exp[x] = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd});
  3263. layer.ffn_up_exp[x] = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), {n_embd, n_ff});
  3264. }
  3265. }
  3266. }
  3267. } break;
  3268. case LLM_ARCH_BAICHUAN:
  3269. {
  3270. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3271. {
  3272. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3273. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3274. }
  3275. for (int i = 0; i < n_layer; ++i) {
  3276. ggml_context * ctx_layer = ctx_for_layer(i);
  3277. ggml_context * ctx_split = ctx_for_layer_split(i);
  3278. auto & layer = model.layers[i];
  3279. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3280. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3281. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3282. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3283. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3284. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3285. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3286. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3287. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3288. }
  3289. } break;
  3290. case LLM_ARCH_FALCON:
  3291. {
  3292. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3293. // output
  3294. {
  3295. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3296. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3297. if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_OUTPUT, "weight").c_str()) >= 0) {
  3298. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3299. } else {
  3300. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // needs to be on GPU
  3301. ml.n_created--; // artificial tensor
  3302. }
  3303. }
  3304. for (int i = 0; i < n_layer; ++i) {
  3305. ggml_context * ctx_layer = ctx_for_layer(i);
  3306. ggml_context * ctx_split = ctx_for_layer_split(i);
  3307. auto & layer = model.layers[i];
  3308. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3309. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3310. if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i).c_str()) >= 0) {
  3311. layer.attn_norm_2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd});
  3312. layer.attn_norm_2_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd});
  3313. }
  3314. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3315. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3316. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3317. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3318. }
  3319. } break;
  3320. case LLM_ARCH_STARCODER:
  3321. {
  3322. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3323. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  3324. // output
  3325. {
  3326. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3327. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3328. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3329. }
  3330. for (int i = 0; i < n_layer; ++i) {
  3331. ggml_context * ctx_layer = ctx_for_layer(i);
  3332. ggml_context * ctx_split = ctx_for_layer_split(i);
  3333. auto & layer = model.layers[i];
  3334. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3335. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3336. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3337. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  3338. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3339. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3340. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3341. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3342. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3343. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3344. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3345. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3346. }
  3347. } break;
  3348. case LLM_ARCH_PERSIMMON:
  3349. {
  3350. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3351. {
  3352. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3353. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3354. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3355. }
  3356. for (int i = 0; i < n_layer; ++i) {
  3357. ggml_context * ctx_layer = ctx_for_layer(i);
  3358. ggml_context * ctx_split = ctx_for_layer_split(i);
  3359. auto & layer = model.layers[i];
  3360. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3361. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3362. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3363. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  3364. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3365. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3366. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3367. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3368. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3369. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3370. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3371. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3372. layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64});
  3373. layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64});
  3374. layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {64});
  3375. layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64});
  3376. }
  3377. } break;
  3378. case LLM_ARCH_BERT:
  3379. case LLM_ARCH_NOMIC_BERT:
  3380. {
  3381. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3382. model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type});
  3383. if (model.arch == LLM_ARCH_BERT) {
  3384. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  3385. }
  3386. model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
  3387. model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
  3388. for (int i = 0; i < n_layer; ++i) {
  3389. ggml_context * ctx_layer = ctx_for_layer(i);
  3390. ggml_context * ctx_split = ctx_for_layer_split(i);
  3391. auto & layer = model.layers[i];
  3392. if (model.arch == LLM_ARCH_BERT) {
  3393. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3394. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  3395. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3396. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  3397. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3398. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  3399. } else {
  3400. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3401. }
  3402. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3403. layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
  3404. layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
  3405. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3406. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3407. if (model.arch == LLM_ARCH_BERT) {
  3408. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3409. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3410. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3411. } else {
  3412. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3413. }
  3414. layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
  3415. layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
  3416. }
  3417. } break;
  3418. case LLM_ARCH_BLOOM:
  3419. {
  3420. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3421. model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
  3422. model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
  3423. // output
  3424. {
  3425. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3426. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3427. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3428. }
  3429. for (int i = 0; i < n_layer; ++i) {
  3430. ggml_context * ctx_layer = ctx_for_layer(i);
  3431. ggml_context * ctx_split = ctx_for_layer_split(i);
  3432. auto & layer = model.layers[i];
  3433. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3434. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3435. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3436. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  3437. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3438. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3439. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3440. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3441. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3442. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3443. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3444. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3445. }
  3446. } break;
  3447. case LLM_ARCH_MPT:
  3448. {
  3449. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3450. // output
  3451. {
  3452. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3453. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3454. }
  3455. for (int i = 0; i < n_layer; ++i) {
  3456. ggml_context * ctx_layer = ctx_for_layer(i);
  3457. ggml_context * ctx_split = ctx_for_layer_split(i);
  3458. auto & layer = model.layers[i];
  3459. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3460. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3461. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3462. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3463. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3464. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3465. // AWQ ScaleActivation layer
  3466. layer.ffn_act = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, false);
  3467. }
  3468. } break;
  3469. case LLM_ARCH_STABLELM:
  3470. {
  3471. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3472. // output
  3473. {
  3474. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3475. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3476. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3477. }
  3478. for (int i = 0; i < n_layer; ++i) {
  3479. ggml_context * ctx_layer = ctx_for_layer(i);
  3480. ggml_context * ctx_split = ctx_for_layer_split(i);
  3481. auto & layer = model.layers[i];
  3482. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3483. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3484. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3485. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3486. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3487. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3488. // optional bias tensors, present in Stable LM 2 1.6B
  3489. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, false);
  3490. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, false);
  3491. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, false);
  3492. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3493. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3494. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3495. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3496. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3497. }
  3498. } break;
  3499. case LLM_ARCH_QWEN:
  3500. {
  3501. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3502. // output
  3503. {
  3504. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3505. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3506. }
  3507. for (int i = 0; i < n_layer; ++i) {
  3508. ggml_context * ctx_layer = ctx_for_layer(i);
  3509. ggml_context * ctx_split = ctx_for_layer_split(i);
  3510. auto & layer = model.layers[i];
  3511. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3512. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3});
  3513. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd*3});
  3514. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3515. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3516. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2});
  3517. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd});
  3518. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff/2});
  3519. }
  3520. } break;
  3521. case LLM_ARCH_QWEN2:
  3522. {
  3523. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3524. // output
  3525. {
  3526. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3527. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3528. }
  3529. for (int i = 0; i < n_layer; ++i) {
  3530. ggml_context * ctx_layer = ctx_for_layer(i);
  3531. ggml_context * ctx_split = ctx_for_layer_split(i);
  3532. auto & layer = model.layers[i];
  3533. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3534. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3535. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3536. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3537. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3538. // optional bias tensors
  3539. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  3540. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  3541. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  3542. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3543. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3544. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3545. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3546. }
  3547. } break;
  3548. case LLM_ARCH_PHI2:
  3549. {
  3550. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3551. // output
  3552. {
  3553. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3554. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3555. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3556. model.output_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab});
  3557. }
  3558. for (int i = 0; i < n_layer; ++i) {
  3559. ggml_context * ctx_layer = ctx_for_layer(i);
  3560. ggml_context * ctx_split = ctx_for_layer_split(i);
  3561. auto & layer = model.layers[i];
  3562. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3563. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3564. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, false);
  3565. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, false);
  3566. if (layer.wqkv == nullptr) {
  3567. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3568. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  3569. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3570. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  3571. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3572. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  3573. }
  3574. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3575. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3576. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3577. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3578. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3579. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3580. }
  3581. } break;
  3582. case LLM_ARCH_PLAMO:
  3583. {
  3584. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3585. // output
  3586. {
  3587. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3588. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3589. }
  3590. for (int i = 0; i < n_layer; ++i) {
  3591. ggml_context * ctx_layer = ctx_for_layer(i);
  3592. ggml_context * ctx_split = ctx_for_layer_split(i);
  3593. auto & layer = model.layers[i];
  3594. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3595. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3596. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3597. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3598. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3599. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3600. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3601. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3602. }
  3603. } break;
  3604. case LLM_ARCH_GPT2:
  3605. {
  3606. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3607. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  3608. // output
  3609. {
  3610. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3611. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3612. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3613. }
  3614. for (int i = 0; i < n_layer; ++i) {
  3615. ggml_context * ctx_layer = ctx_for_layer(i);
  3616. ggml_context * ctx_split = ctx_for_layer_split(i);
  3617. auto & layer = model.layers[i];
  3618. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3619. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3620. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3621. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  3622. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3623. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3624. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3625. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3626. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3627. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3628. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3629. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3630. }
  3631. } break;
  3632. case LLM_ARCH_CODESHELL:
  3633. {
  3634. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3635. // output
  3636. {
  3637. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3638. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3639. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3640. }
  3641. for (int i = 0; i < n_layer; ++i) {
  3642. ggml_context * ctx_layer = ctx_for_layer(i);
  3643. ggml_context * ctx_split = ctx_for_layer_split(i);
  3644. auto & layer = model.layers[i];
  3645. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3646. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3647. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3648. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  3649. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3650. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3651. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3652. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3653. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3654. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3655. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3656. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3657. }
  3658. } break;
  3659. case LLM_ARCH_ORION:
  3660. {
  3661. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3662. {
  3663. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3664. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3665. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3666. }
  3667. for (int i = 0; i < n_layer; ++i) {
  3668. ggml_context * ctx_layer = ctx_for_layer(i);
  3669. ggml_context * ctx_split = ctx_for_layer_split(i);
  3670. auto & layer = model.layers[i];
  3671. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3672. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3673. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3674. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3675. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3676. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3677. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3678. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3679. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3680. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3681. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3682. }
  3683. } break;
  3684. case LLM_ARCH_INTERNLM2:
  3685. {
  3686. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3687. // output
  3688. {
  3689. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3690. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3691. }
  3692. for (int i = 0; i < n_layer; ++i) {
  3693. ggml_context * ctx_layer = ctx_for_layer(i);
  3694. ggml_context * ctx_split = ctx_for_layer_split(i);
  3695. auto & layer = model.layers[i];
  3696. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3697. // layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3698. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3699. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3700. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3701. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3702. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3703. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3704. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3705. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3706. }
  3707. } break;
  3708. default:
  3709. throw std::runtime_error("unknown architecture");
  3710. }
  3711. }
  3712. ml.done_getting_tensors();
  3713. ml.init_mapping(true, use_mlock ? &model.mlock_mmap : nullptr);
  3714. // create the backend buffers
  3715. std::vector<std::pair<ggml_context *, ggml_backend_buffer_t>> ctx_bufs;
  3716. for (auto & it : ctx_map) {
  3717. ggml_backend_buffer_type_t buft = it.first;
  3718. ggml_context * ctx = it.second;
  3719. ggml_backend_buffer_t buf = nullptr;
  3720. // only the mmap region containing the tensors in the model is mapped to the backend buffer
  3721. // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
  3722. // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
  3723. if (ml.use_mmap && buft == llama_default_buffer_type_cpu(true)) {
  3724. size_t first, last;
  3725. ml.get_mapping_range(&first, &last, ctx);
  3726. buf = ggml_backend_cpu_buffer_from_ptr((char *) ml.mapping->addr + first, last - first);
  3727. }
  3728. #ifdef GGML_USE_METAL
  3729. else if (ml.use_mmap && buft == ggml_backend_metal_buffer_type()) {
  3730. const size_t max_size = ggml_get_max_tensor_size(ctx);
  3731. size_t first, last;
  3732. ml.get_mapping_range(&first, &last, ctx);
  3733. buf = ggml_backend_metal_buffer_from_ptr((char *) ml.mapping->addr + first, last - first, max_size);
  3734. }
  3735. #endif
  3736. else {
  3737. buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  3738. if (buf != nullptr && use_mlock && ggml_backend_buffer_is_host(buf)) {
  3739. model.mlock_bufs.emplace_back(new llama_mlock);
  3740. auto & mlock_buf = model.mlock_bufs.back();
  3741. mlock_buf->init (ggml_backend_buffer_get_base(buf));
  3742. mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
  3743. }
  3744. }
  3745. if (buf == nullptr) {
  3746. throw std::runtime_error("failed to allocate buffer");
  3747. }
  3748. // indicate that this buffer contains weights
  3749. // this is used by ggml_backend_sched to improve op scheduling -> ops that use a weight are preferably scheduled to the backend that contains the weight
  3750. ggml_backend_buffer_set_usage(buf, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  3751. model.bufs.push_back(buf);
  3752. ctx_bufs.emplace_back(ctx, buf);
  3753. }
  3754. if (llama_supports_gpu_offload()) {
  3755. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  3756. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  3757. if (n_gpu_layers > (int) hparams.n_layer) {
  3758. LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
  3759. }
  3760. const int max_backend_supported_layers = hparams.n_layer + 1;
  3761. const int max_offloadable_layers = hparams.n_layer + 1;
  3762. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  3763. }
  3764. // print memory requirements
  3765. for (ggml_backend_buffer_t buf : model.bufs) {
  3766. LLAMA_LOG_INFO("%s: %10s buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
  3767. }
  3768. // populate tensors_by_name
  3769. for (ggml_context * ctx : model.ctxs) {
  3770. for (auto * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
  3771. model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
  3772. }
  3773. }
  3774. // load tensor data
  3775. for (auto & it : ctx_bufs) {
  3776. ggml_context * ctx = it.first;
  3777. ggml_backend_buffer_t buf = it.second;
  3778. if (!ml.load_all_data(ctx, progress_callback, progress_callback_user_data, buf, use_mlock ? &model.mlock_mmap : NULL)) {
  3779. return false;
  3780. }
  3781. }
  3782. model.mapping = std::move(ml.mapping);
  3783. // loading time will be recalculate after the first eval, so
  3784. // we take page faults deferred by mmap() into consideration
  3785. model.t_load_us = ggml_time_us() - model.t_start_us;
  3786. return true;
  3787. }
  3788. // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
  3789. static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
  3790. try {
  3791. llama_model_loader ml(fname, params.use_mmap, params.kv_overrides);
  3792. model.hparams.vocab_only = params.vocab_only;
  3793. try {
  3794. llm_load_arch(ml, model);
  3795. } catch(const std::exception & e) {
  3796. throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
  3797. }
  3798. try {
  3799. llm_load_hparams(ml, model);
  3800. } catch(const std::exception & e) {
  3801. throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
  3802. }
  3803. try {
  3804. llm_load_vocab(ml, model);
  3805. } catch(const std::exception & e) {
  3806. throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
  3807. }
  3808. llm_load_print_meta(ml, model);
  3809. if (model.hparams.n_vocab != model.vocab.id_to_token.size()) {
  3810. throw std::runtime_error("vocab size mismatch");
  3811. }
  3812. if (params.vocab_only) {
  3813. LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
  3814. return 0;
  3815. }
  3816. #ifdef GGML_USE_KOMPUTE
  3817. if (params.n_gpu_layers > 0 && (
  3818. !(model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON)
  3819. || !(
  3820. model.ftype == LLAMA_FTYPE_ALL_F32 ||
  3821. model.ftype == LLAMA_FTYPE_MOSTLY_F16 ||
  3822. model.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
  3823. model.ftype == LLAMA_FTYPE_MOSTLY_Q4_1
  3824. )
  3825. )) {
  3826. // TODO(cebtenzzre): propagate this error outside of llama_load_model_from_file
  3827. LLAMA_LOG_WARN("%s: disabling Kompute due to unsupported model arch or quantization\n", __func__);
  3828. params.n_gpu_layers = 0;
  3829. }
  3830. #endif
  3831. if (!llm_load_tensors(
  3832. ml, model, params.n_gpu_layers, params.split_mode, params.main_gpu, params.tensor_split, params.use_mlock,
  3833. params.progress_callback, params.progress_callback_user_data
  3834. )) {
  3835. return -2;
  3836. }
  3837. } catch (const std::exception & err) {
  3838. LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
  3839. return -1;
  3840. }
  3841. return 0;
  3842. }
  3843. //
  3844. // llm_build
  3845. //
  3846. using llm_build_cb = std::function<void(struct ggml_tensor * cur, const char * name, int nl)>;
  3847. enum llm_rope_type {
  3848. LLM_ROPE,
  3849. LLM_ROPE_NEOX,
  3850. LLM_ROPE_GLM,
  3851. };
  3852. enum llm_ffn_op_type {
  3853. LLM_FFN_SILU,
  3854. LLM_FFN_GELU,
  3855. LLM_FFN_RELU,
  3856. LLM_FFN_RELU_SQR,
  3857. };
  3858. enum llm_ffn_gate_type {
  3859. LLM_FFN_SEQ,
  3860. LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
  3861. };
  3862. enum llm_norm_type {
  3863. LLM_NORM,
  3864. LLM_NORM_RMS,
  3865. };
  3866. static struct ggml_tensor * llm_build_inp_embd(
  3867. struct ggml_context * ctx,
  3868. const llama_hparams & hparams,
  3869. const llama_batch & batch,
  3870. struct ggml_tensor * tok_embd,
  3871. struct ggml_tensor * inp_tokens,
  3872. struct ggml_tensor * inp_embd,
  3873. const llm_build_cb & cb) {
  3874. const int64_t n_embd = hparams.n_embd;
  3875. struct ggml_tensor * inpL;
  3876. if (batch.token) {
  3877. struct ggml_tensor * inp_tokens_v = ggml_view_1d(ctx, inp_tokens, batch.n_tokens, 0);
  3878. cb(inp_tokens, "inp_tokens", -1);
  3879. inpL = ggml_get_rows(ctx, tok_embd, inp_tokens_v);
  3880. } else {
  3881. #ifdef GGML_USE_MPI
  3882. GGML_ASSERT(false && "not implemented");
  3883. #endif
  3884. inpL = ggml_view_2d(ctx, inp_embd, n_embd, batch.n_tokens, inp_embd->nb[1], 0);
  3885. }
  3886. return inpL;
  3887. }
  3888. // Persimmon: n_rot = n_embd_head_k/2
  3889. // Other: n_rot = n_embd_head_k
  3890. static void llm_build_k_shift(
  3891. struct ggml_context * ctx,
  3892. const llama_hparams & hparams,
  3893. const llama_cparams & cparams,
  3894. const llama_kv_cache & kv,
  3895. struct ggml_cgraph * graph,
  3896. struct ggml_tensor * K_shift,
  3897. llm_rope_type type,
  3898. int64_t n_ctx,
  3899. float freq_base,
  3900. float freq_scale,
  3901. const llm_build_cb & cb) {
  3902. const int64_t n_layer = hparams.n_layer;
  3903. const int64_t n_head_kv = hparams.n_head_kv;
  3904. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  3905. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  3906. const int32_t n_rot = hparams.n_rot;
  3907. const int32_t n_orig_ctx = cparams.n_yarn_orig_ctx;
  3908. const float ext_factor = cparams.yarn_ext_factor;
  3909. const float attn_factor = cparams.yarn_attn_factor;
  3910. const float beta_fast = cparams.yarn_beta_fast;
  3911. const float beta_slow = cparams.yarn_beta_slow;
  3912. int rope_type = 0;
  3913. switch (type) {
  3914. case LLM_ROPE: rope_type = 0; break;
  3915. case LLM_ROPE_NEOX: rope_type = 2; break;
  3916. case LLM_ROPE_GLM: rope_type = 4; break;
  3917. }
  3918. for (int il = 0; il < n_layer; ++il) {
  3919. struct ggml_tensor * tmp =
  3920. // we rotate only the first n_rot dimensions
  3921. ggml_rope_custom_inplace(ctx,
  3922. ggml_view_3d(ctx, kv.k_l[il],
  3923. n_embd_head_k, n_head_kv, n_ctx,
  3924. ggml_row_size(kv.k_l[il]->type, n_embd_head_k),
  3925. ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa),
  3926. 0),
  3927. K_shift, n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  3928. ext_factor, attn_factor, beta_fast, beta_slow);
  3929. cb(tmp, "K_shifted", il);
  3930. ggml_build_forward_expand(graph, tmp);
  3931. }
  3932. }
  3933. static void llm_build_kv_store(
  3934. struct ggml_context * ctx,
  3935. const llama_hparams & hparams,
  3936. const llama_kv_cache & kv,
  3937. struct ggml_cgraph * graph,
  3938. struct ggml_tensor * k_cur,
  3939. struct ggml_tensor * v_cur,
  3940. int64_t n_ctx,
  3941. int32_t n_tokens,
  3942. int32_t kv_head,
  3943. const llm_build_cb & cb,
  3944. int64_t il) {
  3945. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  3946. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  3947. // compute the transposed [n_tokens, n_embd] V matrix
  3948. struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, n_embd_v_gqa, n_tokens));
  3949. //struct ggml_tensor * v_cur_t = ggml_transpose(ctx, v_cur); // TODO: reshape above is likely not needed
  3950. cb(v_cur_t, "v_cur_t", il);
  3951. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_k_gqa,
  3952. (ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa))*kv_head);
  3953. cb(k_cache_view, "k_cache_view", il);
  3954. struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa,
  3955. ( n_ctx)*ggml_element_size(kv.v_l[il]),
  3956. (kv_head)*ggml_element_size(kv.v_l[il]));
  3957. cb(v_cache_view, "v_cache_view", il);
  3958. // important: storing RoPE-ed version of K in the KV cache!
  3959. ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view));
  3960. ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur_t, v_cache_view));
  3961. }
  3962. static struct ggml_tensor * llm_build_norm(
  3963. struct ggml_context * ctx,
  3964. struct ggml_tensor * cur,
  3965. const llama_hparams & hparams,
  3966. struct ggml_tensor * mw,
  3967. struct ggml_tensor * mb,
  3968. llm_norm_type type,
  3969. const llm_build_cb & cb,
  3970. int il) {
  3971. switch (type) {
  3972. case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break;
  3973. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break;
  3974. }
  3975. if (mw || mb) {
  3976. cb(cur, "norm", il);
  3977. }
  3978. if (mw) {
  3979. cur = ggml_mul(ctx, cur, mw);
  3980. if (mb) {
  3981. cb(cur, "norm_w", il);
  3982. }
  3983. }
  3984. if (mb) {
  3985. cur = ggml_add(ctx, cur, mb);
  3986. }
  3987. return cur;
  3988. }
  3989. static struct ggml_tensor * llm_build_ffn(
  3990. struct ggml_context * ctx,
  3991. struct ggml_tensor * cur,
  3992. struct ggml_tensor * up,
  3993. struct ggml_tensor * up_b,
  3994. struct ggml_tensor * gate,
  3995. struct ggml_tensor * gate_b,
  3996. struct ggml_tensor * down,
  3997. struct ggml_tensor * down_b,
  3998. struct ggml_tensor * act_scales,
  3999. llm_ffn_op_type type_op,
  4000. llm_ffn_gate_type type_gate,
  4001. const llm_build_cb & cb,
  4002. int il) {
  4003. struct ggml_tensor * tmp = ggml_mul_mat(ctx, up, cur);
  4004. cb(tmp, "ffn_up", il);
  4005. if (up_b) {
  4006. tmp = ggml_add(ctx, tmp, up_b);
  4007. cb(tmp, "ffn_up_b", il);
  4008. }
  4009. if (gate) {
  4010. switch (type_gate) {
  4011. case LLM_FFN_SEQ:
  4012. {
  4013. cur = ggml_mul_mat(ctx, gate, tmp);
  4014. cb(cur, "ffn_gate", il);
  4015. } break;
  4016. case LLM_FFN_PAR:
  4017. {
  4018. cur = ggml_mul_mat(ctx, gate, cur);
  4019. cb(cur, "ffn_gate", il);
  4020. } break;
  4021. }
  4022. if (gate_b) {
  4023. cur = ggml_add(ctx, cur, gate_b);
  4024. cb(cur, "ffn_gate_b", il);
  4025. }
  4026. } else {
  4027. cur = tmp;
  4028. }
  4029. switch (type_op) {
  4030. case LLM_FFN_SILU:
  4031. {
  4032. cur = ggml_silu(ctx, cur);
  4033. cb(cur, "ffn_silu", il);
  4034. } break;
  4035. case LLM_FFN_GELU:
  4036. {
  4037. cur = ggml_gelu(ctx, cur);
  4038. cb(cur, "ffn_gelu", il);
  4039. if (act_scales != NULL) {
  4040. cur = ggml_div(ctx, cur, act_scales);
  4041. cb(cur, "ffn_act", il);
  4042. }
  4043. } break;
  4044. case LLM_FFN_RELU:
  4045. {
  4046. cur = ggml_relu(ctx, cur);
  4047. cb(cur, "ffn_relu", il);
  4048. } break;
  4049. case LLM_FFN_RELU_SQR:
  4050. {
  4051. cur = ggml_relu(ctx, cur);
  4052. cb(cur, "ffn_relu", il);
  4053. cur = ggml_sqr(ctx, cur);
  4054. cb(cur, "ffn_sqr(relu)", il);
  4055. } break;
  4056. }
  4057. if (type_gate == LLM_FFN_PAR) {
  4058. cur = ggml_mul(ctx, cur, tmp);
  4059. cb(cur, "ffn_gate_par", il);
  4060. }
  4061. cur = ggml_mul_mat(ctx, down, cur);
  4062. if (down_b) {
  4063. cb(cur, "ffn_down", il);
  4064. }
  4065. if (down_b) {
  4066. cur = ggml_add(ctx, cur, down_b);
  4067. }
  4068. return cur;
  4069. }
  4070. // if max_alibi_bias > 0 then apply ALiBi
  4071. static struct ggml_tensor * llm_build_kqv(
  4072. struct ggml_context * ctx,
  4073. const llama_model & model,
  4074. const llama_hparams & hparams,
  4075. const llama_kv_cache & kv,
  4076. struct ggml_cgraph * graph,
  4077. struct ggml_tensor * wo,
  4078. struct ggml_tensor * wo_b,
  4079. struct ggml_tensor * q_cur,
  4080. struct ggml_tensor * kq_mask,
  4081. int64_t n_ctx,
  4082. int32_t n_tokens,
  4083. int32_t n_kv,
  4084. float max_alibi_bias,
  4085. float kq_scale,
  4086. const llm_build_cb & cb,
  4087. int il) {
  4088. const int64_t n_head = hparams.n_head;
  4089. const int64_t n_head_kv = hparams.n_head_kv;
  4090. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  4091. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  4092. const int64_t n_embd_head_v = hparams.n_embd_head_v;
  4093. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  4094. cb(q, "q", il);
  4095. struct ggml_tensor * k =
  4096. ggml_view_3d(ctx, kv.k_l[il],
  4097. n_embd_head_k, n_kv, n_head_kv,
  4098. ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa),
  4099. ggml_row_size(kv.k_l[il]->type, n_embd_head_k),
  4100. 0);
  4101. cb(k, "k", il);
  4102. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  4103. cb(kq, "kq", il);
  4104. if (model.arch == LLM_ARCH_PHI2) {
  4105. // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
  4106. // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
  4107. ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  4108. }
  4109. if (max_alibi_bias > 0.0f) {
  4110. // temporary branch until we figure out how to handle ggml_alibi through ggml_add
  4111. kq = ggml_scale(ctx, kq, kq_scale);
  4112. cb(kq, "kq_scaled", il);
  4113. if (max_alibi_bias > 0.0f) {
  4114. // TODO: n_head or n_head_kv
  4115. // TODO: K-shift is likely not working
  4116. // TODO: change to ggml_add
  4117. kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, max_alibi_bias);
  4118. cb(kq, "kq_scaled_alibi", il);
  4119. }
  4120. kq = ggml_add(ctx, kq, kq_mask);
  4121. cb(kq, "kq_masked", il);
  4122. kq = ggml_soft_max(ctx, kq);
  4123. cb(kq, "kq_soft_max", il);
  4124. } else {
  4125. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale);
  4126. cb(kq, "kq_soft_max_ext", il);
  4127. }
  4128. // split cached v into n_head heads
  4129. struct ggml_tensor * v =
  4130. ggml_view_3d(ctx, kv.v_l[il],
  4131. n_kv, n_embd_head_v, n_head_kv,
  4132. ggml_element_size(kv.v_l[il])*n_ctx,
  4133. ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head_v,
  4134. 0);
  4135. cb(v, "v", il);
  4136. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  4137. cb(kqv, "kqv", il);
  4138. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  4139. cb(kqv_merged, "kqv_merged", il);
  4140. struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_k*n_head, n_tokens);
  4141. cb(cur, "kqv_merged_cont", il);
  4142. ggml_build_forward_expand(graph, cur);
  4143. cur = ggml_mul_mat(ctx, wo, cur);
  4144. if (wo_b) {
  4145. cb(cur, "kqv_wo", il);
  4146. }
  4147. if (wo_b) {
  4148. cur = ggml_add(ctx, cur, wo_b);
  4149. }
  4150. return cur;
  4151. }
  4152. static struct ggml_tensor * llm_build_kv(
  4153. struct ggml_context * ctx,
  4154. const llama_model & model,
  4155. const llama_hparams & hparams,
  4156. const llama_kv_cache & kv,
  4157. struct ggml_cgraph * graph,
  4158. struct ggml_tensor * wo,
  4159. struct ggml_tensor * wo_b,
  4160. struct ggml_tensor * k_cur,
  4161. struct ggml_tensor * v_cur,
  4162. struct ggml_tensor * q_cur,
  4163. struct ggml_tensor * kq_mask,
  4164. int64_t n_ctx,
  4165. int32_t n_tokens,
  4166. int32_t kv_head,
  4167. int32_t n_kv,
  4168. float max_alibi_bias,
  4169. float kq_scale,
  4170. const llm_build_cb & cb,
  4171. int il) {
  4172. // these nodes are added to the graph together so that they are not reordered
  4173. // by doing so, the number of splits in the graph is reduced
  4174. ggml_build_forward_expand(graph, q_cur);
  4175. ggml_build_forward_expand(graph, k_cur);
  4176. ggml_build_forward_expand(graph, v_cur);
  4177. llm_build_kv_store(ctx, hparams, kv, graph, k_cur, v_cur, n_ctx, n_tokens, kv_head, cb, il);
  4178. struct ggml_tensor * cur;
  4179. cur = llm_build_kqv(ctx, model, hparams, kv, graph,
  4180. wo, wo_b,
  4181. q_cur, kq_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, kq_scale, cb, il);
  4182. cb(cur, "kqv_out", il);
  4183. return cur;
  4184. }
  4185. struct llm_build_context {
  4186. const llama_model & model;
  4187. const llama_context & lctx;
  4188. const llama_hparams & hparams;
  4189. const llama_cparams & cparams;
  4190. const llama_batch & batch;
  4191. const llama_kv_cache & kv_self;
  4192. const int64_t n_embd;
  4193. const int64_t n_layer;
  4194. const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
  4195. const int64_t n_head;
  4196. const int64_t n_head_kv;
  4197. const int64_t n_embd_head_k;
  4198. const int64_t n_embd_k_gqa;
  4199. const int64_t n_embd_head_v;
  4200. const int64_t n_embd_v_gqa;
  4201. const int64_t n_expert;
  4202. const int64_t n_expert_used;
  4203. const float freq_base;
  4204. const float freq_scale;
  4205. const float ext_factor;
  4206. const float attn_factor;
  4207. const float beta_fast;
  4208. const float beta_slow;
  4209. const float norm_eps;
  4210. const float norm_rms_eps;
  4211. const int32_t n_tokens;
  4212. const int32_t n_kv; // size of KV cache to consider (n_kv <= n_ctx)
  4213. const int32_t kv_head; // index of where we store new KV data in the cache
  4214. const int32_t n_orig_ctx;
  4215. const bool do_rope_shift;
  4216. const bool do_pooling;
  4217. const llm_build_cb & cb;
  4218. std::vector<uint8_t> & buf_compute_meta;
  4219. struct ggml_context * ctx0 = nullptr;
  4220. // TODO: consider making the entire interface noexcept
  4221. llm_build_context(
  4222. llama_context & lctx,
  4223. const llama_batch & batch,
  4224. const llm_build_cb & cb,
  4225. bool worst_case) :
  4226. model (lctx.model),
  4227. lctx (lctx),
  4228. hparams (model.hparams),
  4229. cparams (lctx.cparams),
  4230. batch (batch),
  4231. kv_self (lctx.kv_self),
  4232. n_embd (hparams.n_embd),
  4233. n_layer (hparams.n_layer),
  4234. n_ctx (cparams.n_ctx),
  4235. n_head (hparams.n_head),
  4236. n_head_kv (hparams.n_head_kv),
  4237. n_embd_head_k (hparams.n_embd_head_k),
  4238. n_embd_k_gqa (hparams.n_embd_k_gqa()),
  4239. n_embd_head_v (hparams.n_embd_head_v),
  4240. n_embd_v_gqa (hparams.n_embd_v_gqa()),
  4241. n_expert (hparams.n_expert),
  4242. n_expert_used (hparams.n_expert_used),
  4243. freq_base (cparams.rope_freq_base),
  4244. freq_scale (cparams.rope_freq_scale),
  4245. ext_factor (cparams.yarn_ext_factor),
  4246. attn_factor (cparams.yarn_attn_factor),
  4247. beta_fast (cparams.yarn_beta_fast),
  4248. beta_slow (cparams.yarn_beta_slow),
  4249. norm_eps (hparams.f_norm_eps),
  4250. norm_rms_eps (hparams.f_norm_rms_eps),
  4251. n_tokens (batch.n_tokens),
  4252. n_kv (worst_case ? n_ctx : kv_self.n),
  4253. kv_head (worst_case ? n_ctx - n_tokens : kv_self.head),
  4254. n_orig_ctx (cparams.n_yarn_orig_ctx),
  4255. do_rope_shift (worst_case || kv_self.has_shift),
  4256. do_pooling (hparams.pooling_layer && cparams.do_pooling),
  4257. cb (cb),
  4258. buf_compute_meta (lctx.buf_compute_meta) {
  4259. // all initializations should be done in init()
  4260. }
  4261. void init() {
  4262. struct ggml_init_params params = {
  4263. /*.mem_size =*/ buf_compute_meta.size(),
  4264. /*.mem_buffer =*/ buf_compute_meta.data(),
  4265. /*.no_alloc =*/ true,
  4266. };
  4267. ctx0 = ggml_init(params);
  4268. }
  4269. void free() {
  4270. if (ctx0) {
  4271. ggml_free(ctx0);
  4272. ctx0 = nullptr;
  4273. }
  4274. }
  4275. struct ggml_cgraph * build_llama() {
  4276. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4277. const int64_t n_embd_head = hparams.n_embd_head_v;
  4278. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4279. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4280. struct ggml_tensor * cur;
  4281. struct ggml_tensor * inpL;
  4282. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4283. cb(inpL, "inp_embd", -1);
  4284. // inp_pos - contains the positions
  4285. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4286. cb(inp_pos, "inp_pos", -1);
  4287. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4288. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4289. cb(KQ_mask, "KQ_mask", -1);
  4290. // shift the entire K-cache if needed
  4291. if (do_rope_shift) {
  4292. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  4293. }
  4294. for (int il = 0; il < n_layer; ++il) {
  4295. struct ggml_tensor * inpSA = inpL;
  4296. // norm
  4297. cur = llm_build_norm(ctx0, inpL, hparams,
  4298. model.layers[il].attn_norm, NULL,
  4299. LLM_NORM_RMS, cb, il);
  4300. cb(cur, "attn_norm", il);
  4301. // self-attention
  4302. {
  4303. // compute Q and K and RoPE them
  4304. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4305. cb(Qcur, "Qcur", il);
  4306. if (model.layers[il].bq) {
  4307. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  4308. cb(Qcur, "Qcur", il);
  4309. }
  4310. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4311. cb(Kcur, "Kcur", il);
  4312. if (model.layers[il].bk) {
  4313. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  4314. cb(Kcur, "Kcur", il);
  4315. }
  4316. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4317. cb(Vcur, "Vcur", il);
  4318. if (model.layers[il].bv) {
  4319. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  4320. cb(Vcur, "Vcur", il);
  4321. }
  4322. Qcur = ggml_rope_custom(
  4323. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  4324. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  4325. ext_factor, attn_factor, beta_fast, beta_slow
  4326. );
  4327. cb(Qcur, "Qcur", il);
  4328. Kcur = ggml_rope_custom(
  4329. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  4330. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  4331. ext_factor, attn_factor, beta_fast, beta_slow
  4332. );
  4333. cb(Kcur, "Kcur", il);
  4334. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4335. model.layers[il].wo, model.layers[il].bo,
  4336. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4337. cb(cur, "kqv_out", il);
  4338. }
  4339. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4340. cb(ffn_inp, "ffn_inp", il);
  4341. // feed-forward network
  4342. if (model.layers[il].ffn_gate_inp == nullptr) {
  4343. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4344. model.layers[il].ffn_norm, NULL,
  4345. LLM_NORM_RMS, cb, il);
  4346. cb(cur, "ffn_norm", il);
  4347. cur = llm_build_ffn(ctx0, cur,
  4348. model.layers[il].ffn_up, NULL,
  4349. model.layers[il].ffn_gate, NULL,
  4350. model.layers[il].ffn_down, NULL,
  4351. NULL,
  4352. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4353. cb(cur, "ffn_out", il);
  4354. } else {
  4355. // MoE branch
  4356. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4357. model.layers[il].ffn_norm, NULL,
  4358. LLM_NORM_RMS, cb, il);
  4359. cb(cur, "ffn_norm", il);
  4360. ggml_tensor * logits = ggml_mul_mat(ctx0, model.layers[il].ffn_gate_inp, cur); // [n_tokens, num_experts]
  4361. cb(logits, "ffn_moe_logits", il);
  4362. ggml_tensor * probs = ggml_soft_max(ctx0, logits); // [n_tokens, num_experts]
  4363. cb(probs, "ffn_moe_probs", il);
  4364. // select experts
  4365. ggml_tensor * selected_experts = ggml_top_k(ctx0, probs, n_expert_used); // [n_tokens, num_experts_per_tok]
  4366. cb(selected_experts->src[0], "ffn_moe_argsort", il);
  4367. ggml_tensor * weights = ggml_get_rows(ctx0,
  4368. ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts);
  4369. cb(weights, "ffn_moe_weights", il);
  4370. weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); // [n_tokens, num_experts_per_tok]
  4371. ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights);
  4372. cb(weights_sum, "ffn_moe_weights_sum", il);
  4373. weights = ggml_div(ctx0, weights, weights_sum); // [n_tokens, num_experts_per_tok]
  4374. cb(weights, "ffn_moe_weights_norm", il);
  4375. // compute expert outputs
  4376. ggml_tensor * moe_out = nullptr;
  4377. for (int i = 0; i < n_expert_used; ++i) {
  4378. ggml_tensor * cur_expert;
  4379. ggml_tensor * cur_up = ggml_mul_mat_id(ctx0, model.layers[il].ffn_up_exp, n_expert, selected_experts, i, cur);
  4380. cb(cur_up, "ffn_moe_up", il);
  4381. ggml_tensor * cur_gate = ggml_mul_mat_id(ctx0, model.layers[il].ffn_gate_exp, n_expert, selected_experts, i, cur);
  4382. cb(cur_gate, "ffn_moe_gate", il);
  4383. cur_gate = ggml_silu(ctx0, cur_gate);
  4384. cb(cur_gate, "ffn_moe_silu", il);
  4385. cur_expert = ggml_mul(ctx0, cur_up, cur_gate); // [n_tokens, n_embd]
  4386. cb(cur_expert, "ffn_moe_gate_par", il);
  4387. cur_expert = ggml_mul_mat_id(ctx0, model.layers[il].ffn_down_exp, n_expert, selected_experts, i, cur_expert); // [n_tokens, n_embd]
  4388. cb(cur_expert, "ffn_moe_down", il);
  4389. cur_expert = ggml_mul(ctx0, cur_expert,
  4390. ggml_view_2d(ctx0, weights, 1, n_tokens, weights->nb[1], i*weights->nb[0]));
  4391. cb(cur_expert, "ffn_moe_weighted", il);
  4392. if (i == 0) {
  4393. moe_out = cur_expert;
  4394. } else {
  4395. moe_out = ggml_add(ctx0, moe_out, cur_expert);
  4396. cb(moe_out, "ffn_moe_out", il);
  4397. }
  4398. }
  4399. cur = moe_out;
  4400. }
  4401. cur = ggml_add(ctx0, cur, ffn_inp);
  4402. cb(cur, "l_out", il);
  4403. // input for next layer
  4404. inpL = cur;
  4405. }
  4406. cur = inpL;
  4407. cur = llm_build_norm(ctx0, cur, hparams,
  4408. model.output_norm, NULL,
  4409. LLM_NORM_RMS, cb, -1);
  4410. cb(cur, "result_norm", -1);
  4411. // lm_head
  4412. cur = ggml_mul_mat(ctx0, model.output, cur);
  4413. cb(cur, "result_output", -1);
  4414. ggml_build_forward_expand(gf, cur);
  4415. return gf;
  4416. }
  4417. struct ggml_cgraph * build_baichuan() {
  4418. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4419. const int64_t n_embd_head = hparams.n_embd_head_v;
  4420. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4421. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4422. struct ggml_tensor * cur;
  4423. struct ggml_tensor * inpL;
  4424. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4425. cb(inpL, "inp_embd", -1);
  4426. // inp_pos - contains the positions
  4427. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4428. cb(inp_pos, "inp_pos", -1);
  4429. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4430. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4431. cb(KQ_mask, "KQ_mask", -1);
  4432. // shift the entire K-cache if needed
  4433. if (do_rope_shift) {
  4434. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  4435. }
  4436. for (int il = 0; il < n_layer; ++il) {
  4437. struct ggml_tensor * inpSA = inpL;
  4438. cur = llm_build_norm(ctx0, inpL, hparams,
  4439. model.layers[il].attn_norm, NULL,
  4440. LLM_NORM_RMS, cb, il);
  4441. cb(cur, "attn_norm", il);
  4442. // self-attention
  4443. {
  4444. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4445. cb(Qcur, "Qcur", il);
  4446. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4447. cb(Kcur, "Kcur", il);
  4448. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4449. cb(Vcur, "Vcur", il);
  4450. switch (model.type) {
  4451. case MODEL_7B:
  4452. Qcur = ggml_rope_custom(
  4453. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  4454. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  4455. ext_factor, attn_factor, beta_fast, beta_slow
  4456. );
  4457. Kcur = ggml_rope_custom(
  4458. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  4459. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  4460. ext_factor, attn_factor, beta_fast, beta_slow
  4461. );
  4462. break;
  4463. case MODEL_13B:
  4464. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens);
  4465. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens);
  4466. break;
  4467. default:
  4468. GGML_ASSERT(false);
  4469. }
  4470. cb(Qcur, "Qcur", il);
  4471. cb(Kcur, "Kcur", il);
  4472. // apply ALiBi for 13B model
  4473. const float max_alibi_bias = model.type == MODEL_13B ? 8.0f : -1.0f;
  4474. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4475. model.layers[il].wo, NULL,
  4476. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4477. cb(cur, "kqv_out", il);
  4478. }
  4479. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4480. cb(ffn_inp, "ffn_inp", il);
  4481. // feed-forward network
  4482. {
  4483. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4484. model.layers[il].ffn_norm, NULL,
  4485. LLM_NORM_RMS, cb, il);
  4486. cb(cur, "ffn_norm", il);
  4487. cur = llm_build_ffn(ctx0, cur,
  4488. model.layers[il].ffn_up, NULL,
  4489. model.layers[il].ffn_gate, NULL,
  4490. model.layers[il].ffn_down, NULL,
  4491. NULL,
  4492. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4493. cb(cur, "ffn_out", il);
  4494. }
  4495. cur = ggml_add(ctx0, cur, ffn_inp);
  4496. cb(cur, "l_out", il);
  4497. // input for next layer
  4498. inpL = cur;
  4499. }
  4500. cur = inpL;
  4501. cur = llm_build_norm(ctx0, cur, hparams,
  4502. model.output_norm, NULL,
  4503. LLM_NORM_RMS, cb, -1);
  4504. cb(cur, "result_norm", -1);
  4505. // lm_head
  4506. cur = ggml_mul_mat(ctx0, model.output, cur);
  4507. cb(cur, "result_output", -1);
  4508. ggml_build_forward_expand(gf, cur);
  4509. return gf;
  4510. }
  4511. struct ggml_cgraph * build_falcon() {
  4512. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4513. const int64_t n_embd_head = hparams.n_embd_head_v;
  4514. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4515. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4516. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4517. struct ggml_tensor * cur;
  4518. struct ggml_tensor * inpL;
  4519. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4520. cb(inpL, "inp_embd", -1);
  4521. // inp_pos - contains the positions
  4522. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4523. cb(inp_pos, "inp_pos", -1);
  4524. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4525. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4526. cb(KQ_mask, "KQ_mask", -1);
  4527. // shift the entire K-cache if needed
  4528. if (do_rope_shift) {
  4529. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  4530. }
  4531. for (int il = 0; il < n_layer; ++il) {
  4532. struct ggml_tensor * attn_norm;
  4533. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  4534. model.layers[il].attn_norm,
  4535. model.layers[il].attn_norm_b,
  4536. LLM_NORM, cb, il);
  4537. cb(attn_norm, "attn_norm", il);
  4538. // self-attention
  4539. {
  4540. if (model.layers[il].attn_norm_2) {
  4541. // Falcon-40B
  4542. cur = llm_build_norm(ctx0, inpL, hparams,
  4543. model.layers[il].attn_norm_2,
  4544. model.layers[il].attn_norm_2_b,
  4545. LLM_NORM, cb, il);
  4546. cb(cur, "attn_norm_2", il);
  4547. } else {
  4548. cur = attn_norm;
  4549. }
  4550. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4551. cb(cur, "wqkv", il);
  4552. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4553. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4554. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4555. cb(Qcur, "Qcur", il);
  4556. cb(Kcur, "Kcur", il);
  4557. cb(Vcur, "Vcur", il);
  4558. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4559. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4560. // using mode = 2 for neox mode
  4561. Qcur = ggml_rope_custom(
  4562. ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4563. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4564. );
  4565. cb(Qcur, "Qcur", il);
  4566. Kcur = ggml_rope_custom(
  4567. ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4568. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4569. );
  4570. cb(Kcur, "Kcur", il);
  4571. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4572. model.layers[il].wo, NULL,
  4573. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4574. cb(cur, "kqv_out", il);
  4575. }
  4576. struct ggml_tensor * ffn_inp = cur;
  4577. // feed forward
  4578. {
  4579. cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result
  4580. model.layers[il].ffn_up, NULL,
  4581. NULL, NULL,
  4582. model.layers[il].ffn_down, NULL,
  4583. NULL,
  4584. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4585. cb(cur, "ffn_out", il);
  4586. }
  4587. cur = ggml_add(ctx0, cur, ffn_inp);
  4588. cb(cur, "l_out", il);
  4589. cur = ggml_add(ctx0, cur, inpL);
  4590. cb(cur, "l_out", il);
  4591. // input for next layer
  4592. inpL = cur;
  4593. }
  4594. cur = inpL;
  4595. // norm
  4596. cur = llm_build_norm(ctx0, cur, hparams,
  4597. model.output_norm,
  4598. model.output_norm_b,
  4599. LLM_NORM, cb, -1);
  4600. cb(cur, "result_norm", -1);
  4601. cur = ggml_mul_mat(ctx0, model.output, cur);
  4602. cb(cur, "result_output", -1);
  4603. ggml_build_forward_expand(gf, cur);
  4604. return gf;
  4605. }
  4606. struct ggml_cgraph * build_starcoder() {
  4607. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4608. const int64_t n_embd_head = hparams.n_embd_head_v;
  4609. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4610. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4611. struct ggml_tensor * cur;
  4612. struct ggml_tensor * pos;
  4613. struct ggml_tensor * inpL;
  4614. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4615. cb(inpL, "inp_embd", -1);
  4616. // inp_pos - contains the positions
  4617. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4618. cb(inp_pos, "inp_pos", -1);
  4619. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4620. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4621. cb(KQ_mask, "KQ_mask", -1);
  4622. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  4623. cb(pos, "pos_embd", -1);
  4624. inpL = ggml_add(ctx0, inpL, pos);
  4625. cb(inpL, "inpL", -1);
  4626. for (int il = 0; il < n_layer; ++il) {
  4627. cur = llm_build_norm(ctx0, inpL, hparams,
  4628. model.layers[il].attn_norm,
  4629. model.layers[il].attn_norm_b,
  4630. LLM_NORM, cb, il);
  4631. cb(cur, "attn_norm", il);
  4632. // self-attention
  4633. {
  4634. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4635. cb(cur, "wqkv", il);
  4636. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4637. cb(cur, "bqkv", il);
  4638. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4639. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4640. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4641. cb(Qcur, "Qcur", il);
  4642. cb(Kcur, "Kcur", il);
  4643. cb(Vcur, "Vcur", il);
  4644. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4645. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4646. model.layers[il].wo, model.layers[il].bo,
  4647. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4648. cb(cur, "kqv_out", il);
  4649. }
  4650. // add the input
  4651. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4652. cb(ffn_inp, "ffn_inp", il);
  4653. // FF
  4654. {
  4655. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4656. model.layers[il].ffn_norm,
  4657. model.layers[il].ffn_norm_b,
  4658. LLM_NORM, cb, il);
  4659. cb(cur, "ffn_norm", il);
  4660. cur = llm_build_ffn(ctx0, cur,
  4661. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4662. NULL, NULL,
  4663. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4664. NULL,
  4665. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4666. cb(cur, "ffn_out", il);
  4667. }
  4668. inpL = ggml_add(ctx0, cur, ffn_inp);
  4669. cb(inpL, "l_out", il);
  4670. }
  4671. cur = llm_build_norm(ctx0, inpL, hparams,
  4672. model.output_norm,
  4673. model.output_norm_b,
  4674. LLM_NORM, cb, -1);
  4675. cb(cur, "result_norm", -1);
  4676. cur = ggml_mul_mat(ctx0, model.output, cur);
  4677. cb(cur, "result_output", -1);
  4678. ggml_build_forward_expand(gf, cur);
  4679. return gf;
  4680. }
  4681. struct ggml_cgraph * build_persimmon() {
  4682. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4683. const int64_t n_embd_head = hparams.n_embd_head_v;
  4684. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4685. GGML_ASSERT(n_embd_head/2 == hparams.n_rot);
  4686. struct ggml_tensor * cur;
  4687. struct ggml_tensor * inpL;
  4688. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4689. cb(inpL, "inp_embd", -1);
  4690. // inp_pos - contains the positions
  4691. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4692. cb(inp_pos, "inp_pos", -1);
  4693. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4694. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4695. cb(KQ_mask, "KQ_mask", -1);
  4696. if (do_rope_shift) {
  4697. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  4698. }
  4699. for (int il = 0; il < n_layer; ++il) {
  4700. struct ggml_tensor * residual = inpL;
  4701. cur = llm_build_norm(ctx0, inpL, hparams,
  4702. model.layers[il].attn_norm,
  4703. model.layers[il].attn_norm_b,
  4704. LLM_NORM, cb, il);
  4705. cb(cur, "attn_norm", il);
  4706. // self attention
  4707. {
  4708. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4709. cb(cur, "wqkv", il);
  4710. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4711. cb(cur, "bqkv", il);
  4712. // split qkv
  4713. GGML_ASSERT(n_head_kv == n_head);
  4714. struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens);
  4715. cb(tmpqkv, "tmpqkv", il);
  4716. struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2));
  4717. cb(tmpqkv_perm, "tmpqkv", il);
  4718. struct ggml_tensor * tmpq = ggml_view_3d(
  4719. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4720. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4721. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4722. 0
  4723. );
  4724. cb(tmpq, "tmpq", il);
  4725. struct ggml_tensor * tmpk = ggml_view_3d(
  4726. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4727. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4728. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4729. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens
  4730. );
  4731. cb(tmpk, "tmpk", il);
  4732. // Q/K Layernorm
  4733. tmpq = llm_build_norm(ctx0, tmpq, hparams,
  4734. model.layers[il].attn_q_norm,
  4735. model.layers[il].attn_q_norm_b,
  4736. LLM_NORM, cb, il);
  4737. cb(tmpq, "tmpq", il);
  4738. tmpk = llm_build_norm(ctx0, tmpk, hparams,
  4739. model.layers[il].attn_k_norm,
  4740. model.layers[il].attn_k_norm_b,
  4741. LLM_NORM, cb, il);
  4742. cb(tmpk, "tmpk", il);
  4743. // RoPE the first n_rot of q/k, pass the other half, and concat.
  4744. struct ggml_tensor * qrot = ggml_view_3d(
  4745. ctx0, tmpq, hparams.n_rot, n_head, n_tokens,
  4746. ggml_element_size(tmpq) * n_embd_head,
  4747. ggml_element_size(tmpq) * n_embd_head * n_head,
  4748. 0
  4749. );
  4750. cb(qrot, "qrot", il);
  4751. struct ggml_tensor * krot = ggml_view_3d(
  4752. ctx0, tmpk, hparams.n_rot, n_head, n_tokens,
  4753. ggml_element_size(tmpk) * n_embd_head,
  4754. ggml_element_size(tmpk) * n_embd_head * n_head,
  4755. 0
  4756. );
  4757. cb(krot, "krot", il);
  4758. // get the second half of tmpq, e.g tmpq[n_rot:, :, :]
  4759. struct ggml_tensor * qpass = ggml_view_3d(
  4760. ctx0, tmpq, hparams.n_rot, n_head, n_tokens,
  4761. ggml_element_size(tmpq) * n_embd_head,
  4762. ggml_element_size(tmpq) * n_embd_head * n_head,
  4763. ggml_element_size(tmpq) * hparams.n_rot
  4764. );
  4765. cb(qpass, "qpass", il);
  4766. struct ggml_tensor * kpass = ggml_view_3d(
  4767. ctx0, tmpk, hparams.n_rot, n_head, n_tokens,
  4768. ggml_element_size(tmpk) * n_embd_head,
  4769. ggml_element_size(tmpk) * n_embd_head * n_head,
  4770. ggml_element_size(tmpk) * hparams.n_rot
  4771. );
  4772. cb(kpass, "kpass", il);
  4773. struct ggml_tensor * qrotated = ggml_rope_custom(
  4774. ctx0, qrot, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4775. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4776. );
  4777. cb(qrotated, "qrotated", il);
  4778. struct ggml_tensor * krotated = ggml_rope_custom(
  4779. ctx0, krot, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4780. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4781. );
  4782. cb(krotated, "krotated", il);
  4783. // ggml currently only supports concatenation on dim=2
  4784. // so we need to permute qrot, qpass, concat, then permute back.
  4785. qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3));
  4786. cb(qrotated, "qrotated", il);
  4787. krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3));
  4788. cb(krotated, "krotated", il);
  4789. qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3));
  4790. cb(qpass, "qpass", il);
  4791. kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3));
  4792. cb(kpass, "kpass", il);
  4793. struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass);
  4794. cb(Qcur, "Qcur", il);
  4795. struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass);
  4796. cb(Kcur, "Kcur", il);
  4797. struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 2, 1, 0, 3));
  4798. cb(Q, "Q", il);
  4799. Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3));
  4800. cb(Kcur, "Kcur", il);
  4801. struct ggml_tensor * Vcur = ggml_view_3d(
  4802. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4803. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4804. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4805. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2
  4806. );
  4807. cb(Vcur, "Vcur", il);
  4808. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4809. model.layers[il].wo, model.layers[il].bo,
  4810. Kcur, Vcur, Q, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4811. cb(cur, "kqv_out", il);
  4812. }
  4813. struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
  4814. cb(ffn_inp, "ffn_inp", il);
  4815. // feed-forward network
  4816. {
  4817. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4818. model.layers[il].ffn_norm,
  4819. model.layers[il].ffn_norm_b,
  4820. LLM_NORM, cb, il);
  4821. cb(cur, "ffn_norm", il);
  4822. cur = llm_build_ffn(ctx0, cur,
  4823. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4824. NULL, NULL,
  4825. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4826. NULL,
  4827. LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il);
  4828. cb(cur, "ffn_out", il);
  4829. }
  4830. cur = ggml_add(ctx0, cur, ffn_inp);
  4831. cb(cur, "l_out", il);
  4832. inpL = cur;
  4833. }
  4834. cur = inpL;
  4835. cur = llm_build_norm(ctx0, cur, hparams,
  4836. model.output_norm,
  4837. model.output_norm_b,
  4838. LLM_NORM, cb, -1);
  4839. cb(cur, "result_norm", -1);
  4840. cur = ggml_mul_mat(ctx0, model.output, cur);
  4841. cb(cur, "result_output", -1);
  4842. ggml_build_forward_expand(gf, cur);
  4843. return gf;
  4844. }
  4845. struct ggml_cgraph * build_refact() {
  4846. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4847. const int64_t n_embd_head = hparams.n_embd_head_v;
  4848. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4849. struct ggml_tensor * cur;
  4850. struct ggml_tensor * inpL;
  4851. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4852. cb(inpL, "inp_embd", -1);
  4853. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4854. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4855. cb(KQ_mask, "KQ_mask", -1);
  4856. for (int il = 0; il < n_layer; ++il) {
  4857. struct ggml_tensor * inpSA = inpL;
  4858. cur = llm_build_norm(ctx0, inpL, hparams,
  4859. model.layers[il].attn_norm, NULL,
  4860. LLM_NORM_RMS, cb, il);
  4861. cb(cur, "attn_norm", il);
  4862. // self-attention
  4863. {
  4864. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4865. cb(Qcur, "Qcur", il);
  4866. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4867. cb(Kcur, "Kcur", il);
  4868. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4869. cb(Vcur, "Vcur", il);
  4870. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4871. cb(Kcur, "Kcur", il);
  4872. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4873. cb(Qcur, "Qcur", il);
  4874. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4875. model.layers[il].wo, NULL,
  4876. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4877. cb(cur, "kqv_out", il);
  4878. }
  4879. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4880. cb(ffn_inp, "ffn_inp", il);
  4881. // feed-forward network
  4882. {
  4883. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4884. model.layers[il].ffn_norm, NULL,
  4885. LLM_NORM_RMS, cb, il);
  4886. cb(cur, "ffn_norm", il);
  4887. cur = llm_build_ffn(ctx0, cur,
  4888. model.layers[il].ffn_up, NULL,
  4889. model.layers[il].ffn_gate, NULL,
  4890. model.layers[il].ffn_down, NULL,
  4891. NULL,
  4892. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4893. cb(cur, "ffn_out", il);
  4894. }
  4895. cur = ggml_add(ctx0, cur, ffn_inp);
  4896. cb(cur, "l_out", il);
  4897. // input for next layer
  4898. inpL = cur;
  4899. }
  4900. cur = inpL;
  4901. cur = llm_build_norm(ctx0, cur, hparams,
  4902. model.output_norm, NULL,
  4903. LLM_NORM_RMS, cb, -1);
  4904. cb(cur, "result_norm", -1);
  4905. // lm_head
  4906. cur = ggml_mul_mat(ctx0, model.output, cur);
  4907. cb(cur, "result_output", -1);
  4908. ggml_build_forward_expand(gf, cur);
  4909. return gf;
  4910. }
  4911. struct ggml_cgraph * build_bert() {
  4912. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4913. const int64_t n_embd_head = hparams.n_embd_head_v;
  4914. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4915. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4916. struct ggml_tensor * cur;
  4917. struct ggml_tensor * inpL;
  4918. // get input vectors with right size
  4919. const size_t stride1 = n_tokens * ggml_type_size(lctx.inp_tokens->type);
  4920. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4921. struct ggml_tensor * inp_sum = ggml_view_2d(ctx0, lctx.inp_sum, n_tokens, n_tokens, stride1, 0);
  4922. // construct input embeddings (token, type, position)
  4923. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4924. // token types are hardcoded to zero ("Sentence A")
  4925. struct ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0);
  4926. inpL = ggml_add(ctx0, inpL, type_row0);
  4927. if (model.arch == LLM_ARCH_BERT) {
  4928. inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL);
  4929. }
  4930. cb(inpL, "inp_embd", -1);
  4931. // embed layer norm
  4932. inpL = llm_build_norm(ctx0, inpL, hparams, model.tok_norm, model.tok_norm_b, LLM_NORM, cb, -1);
  4933. cb(inpL, "inp_norm", -1);
  4934. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4935. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4936. cb(KQ_mask, "KQ_mask", -1); // [n_kv, n_tokens]
  4937. // iterate layers
  4938. for (int il = 0; il < n_layer; ++il) {
  4939. struct ggml_tensor * cur = inpL;
  4940. // self-attention
  4941. if (model.arch == LLM_ARCH_BERT) {
  4942. struct ggml_tensor * Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq);
  4943. cb(Qcur, "Qcur", il);
  4944. struct ggml_tensor * Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), model.layers[il].bk);
  4945. cb(Kcur, "Kcur", il);
  4946. struct ggml_tensor * Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), model.layers[il].bv);
  4947. cb(Vcur, "Vcur", il);
  4948. // seems like we just need to do this for Q?
  4949. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4950. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4951. model.layers[il].wo, model.layers[il].bo,
  4952. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4953. cb(cur, "kqv_out", il);
  4954. } else {
  4955. // compute Q and K and RoPE them
  4956. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4957. cb(cur, "wqkv", il);
  4958. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4959. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4960. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4961. cb(Qcur, "Qcur", il);
  4962. cb(Kcur, "Kcur", il);
  4963. cb(Vcur, "Vcur", il);
  4964. Qcur = ggml_rope_custom(
  4965. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  4966. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4967. ext_factor, attn_factor, beta_fast, beta_slow
  4968. );
  4969. cb(Qcur, "Qcur", il);
  4970. Kcur = ggml_rope_custom(
  4971. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  4972. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4973. ext_factor, attn_factor, beta_fast, beta_slow
  4974. );
  4975. cb(Kcur, "Kcur", il);
  4976. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4977. model.layers[il].wo, model.layers[il].bo,
  4978. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4979. cb(cur, "kqv_out", il);
  4980. }
  4981. // re-add the layer input
  4982. cur = ggml_add(ctx0, cur, inpL);
  4983. // attention layer norm
  4984. cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, cb, il);
  4985. struct ggml_tensor * ffn_inp = cur;
  4986. cb(ffn_inp, "ffn_inp", il);
  4987. // feed-forward network
  4988. if (model.arch == LLM_ARCH_BERT) {
  4989. cur = llm_build_ffn(ctx0, cur,
  4990. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4991. NULL, NULL,
  4992. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4993. NULL,
  4994. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4995. } else {
  4996. cur = llm_build_ffn(ctx0, cur,
  4997. model.layers[il].ffn_up, NULL,
  4998. model.layers[il].ffn_gate, NULL,
  4999. model.layers[il].ffn_down, NULL,
  5000. NULL,
  5001. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5002. }
  5003. cb(cur, "ffn_out", il);
  5004. // attentions bypass the intermediate layer
  5005. cur = ggml_add(ctx0, cur, ffn_inp);
  5006. // output layer norm
  5007. cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, cb, il);
  5008. // input for next layer
  5009. inpL = cur;
  5010. }
  5011. // final output
  5012. cur = inpL;
  5013. // pooling layer
  5014. if (do_pooling) {
  5015. cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, cur)), inp_sum);
  5016. }
  5017. cb(cur, "result_embd", -1);
  5018. ggml_build_forward_expand(gf, cur);
  5019. return gf;
  5020. }
  5021. struct ggml_cgraph * build_bloom() {
  5022. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5023. const int64_t n_embd_head = hparams.n_embd_head_v;
  5024. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5025. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5026. struct ggml_tensor * cur;
  5027. struct ggml_tensor * inpL;
  5028. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5029. cb(inpL, "inp_embd", -1);
  5030. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5031. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5032. cb(KQ_mask, "KQ_mask", -1);
  5033. inpL = llm_build_norm(ctx0, inpL, hparams,
  5034. model.tok_norm,
  5035. model.tok_norm_b,
  5036. LLM_NORM, cb, -1);
  5037. cb(inpL, "inp_norm", -1);
  5038. for (int il = 0; il < n_layer; ++il) {
  5039. cur = llm_build_norm(ctx0, inpL, hparams,
  5040. model.layers[il].attn_norm,
  5041. model.layers[il].attn_norm_b,
  5042. LLM_NORM, cb, il);
  5043. cb(cur, "attn_norm", il);
  5044. // self-attention
  5045. {
  5046. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  5047. cb(cur, "wqkv", il);
  5048. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5049. cb(cur, "bqkv", il);
  5050. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5051. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5052. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5053. cb(Qcur, "Qcur", il);
  5054. cb(Kcur, "Kcur", il);
  5055. cb(Vcur, "Vcur", il);
  5056. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5057. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5058. model.layers[il].wo, model.layers[il].bo,
  5059. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5060. cb(cur, "kqv_out", il);
  5061. }
  5062. // Add the input
  5063. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5064. cb(ffn_inp, "ffn_inp", il);
  5065. // FF
  5066. {
  5067. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5068. model.layers[il].ffn_norm,
  5069. model.layers[il].ffn_norm_b,
  5070. LLM_NORM, cb, il);
  5071. cb(cur, "ffn_norm", il);
  5072. cur = llm_build_ffn(ctx0, cur,
  5073. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  5074. NULL, NULL,
  5075. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  5076. NULL,
  5077. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5078. cb(cur, "ffn_out", il);
  5079. }
  5080. inpL = ggml_add(ctx0, cur, ffn_inp);
  5081. cb(inpL, "l_out", il);
  5082. }
  5083. cur = llm_build_norm(ctx0, inpL, hparams,
  5084. model.output_norm,
  5085. model.output_norm_b,
  5086. LLM_NORM, cb, -1);
  5087. cb(cur, "result_norm", -1);
  5088. cur = ggml_mul_mat(ctx0, model.output, cur);
  5089. cb(cur, "result_output", -1);
  5090. ggml_build_forward_expand(gf, cur);
  5091. return gf;
  5092. }
  5093. struct ggml_cgraph * build_mpt() {
  5094. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5095. const int64_t n_embd_head = hparams.n_embd_head_v;
  5096. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5097. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5098. struct ggml_tensor * cur;
  5099. struct ggml_tensor * inpL;
  5100. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5101. cb(inpL, "inp_embd", -1);
  5102. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5103. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5104. cb(KQ_mask, "KQ_mask", -1);
  5105. for (int il = 0; il < n_layer; ++il) {
  5106. struct ggml_tensor * attn_norm;
  5107. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  5108. model.layers[il].attn_norm,
  5109. NULL,
  5110. LLM_NORM, cb, il);
  5111. cb(attn_norm, "attn_norm", il);
  5112. // self-attention
  5113. {
  5114. cur = attn_norm;
  5115. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  5116. cb(cur, "wqkv", il);
  5117. if (hparams.f_clamp_kqv > 0.0f) {
  5118. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  5119. cb(cur, "wqkv_clamped", il);
  5120. }
  5121. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5122. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5123. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5124. cb(Qcur, "Qcur", il);
  5125. cb(Kcur, "Kcur", il);
  5126. cb(Vcur, "Vcur", il);
  5127. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5128. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5129. model.layers[il].wo, NULL,
  5130. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, hparams.f_max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5131. cb(cur, "kqv_out", il);
  5132. }
  5133. // Add the input
  5134. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5135. cb(ffn_inp, "ffn_inp", il);
  5136. // feed forward
  5137. {
  5138. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5139. model.layers[il].ffn_norm,
  5140. NULL,
  5141. LLM_NORM, cb, il);
  5142. cb(cur, "ffn_norm", il);
  5143. cur = llm_build_ffn(ctx0, cur,
  5144. model.layers[il].ffn_up, NULL,
  5145. NULL, NULL,
  5146. model.layers[il].ffn_down, NULL,
  5147. model.layers[il].ffn_act,
  5148. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5149. cb(cur, "ffn_out", il);
  5150. }
  5151. cur = ggml_add(ctx0, cur, ffn_inp);
  5152. cb(cur, "l_out", il);
  5153. // input for next layer
  5154. inpL = cur;
  5155. }
  5156. cur = inpL;
  5157. cur = llm_build_norm(ctx0, cur, hparams,
  5158. model.output_norm,
  5159. NULL,
  5160. LLM_NORM, cb, -1);
  5161. cb(cur, "result_norm", -1);
  5162. cur = ggml_mul_mat(ctx0, model.output, cur);
  5163. cb(cur, "result_output", -1);
  5164. ggml_build_forward_expand(gf, cur);
  5165. return gf;
  5166. }
  5167. struct ggml_cgraph * build_stablelm() {
  5168. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  5169. const int64_t n_embd_head = hparams.n_embd_head_v;
  5170. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5171. struct ggml_tensor * cur;
  5172. struct ggml_tensor * inpL;
  5173. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5174. cb(inpL, "inp_embd", -1);
  5175. // inp_pos - contains the positions
  5176. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5177. cb(inp_pos, "inp_pos", -1);
  5178. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5179. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5180. cb(KQ_mask, "KQ_mask", -1);
  5181. // shift the entire K-cache if needed
  5182. if (do_rope_shift) {
  5183. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  5184. }
  5185. for (int il = 0; il < n_layer; ++il) {
  5186. struct ggml_tensor * inpSA = inpL;
  5187. // norm
  5188. cur = llm_build_norm(ctx0, inpL, hparams,
  5189. model.layers[il].attn_norm,
  5190. model.layers[il].attn_norm_b,
  5191. LLM_NORM, cb, il);
  5192. cb(cur, "attn_norm", il);
  5193. // self-attention
  5194. {
  5195. // compute Q and K and RoPE them
  5196. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  5197. cb(Qcur, "Qcur", il);
  5198. if (model.layers[il].bq) {
  5199. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5200. cb(Qcur, "Qcur", il);
  5201. }
  5202. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  5203. cb(Kcur, "Kcur", il);
  5204. if (model.layers[il].bk) {
  5205. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5206. cb(Kcur, "Kcur", il);
  5207. }
  5208. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  5209. cb(Vcur, "Vcur", il);
  5210. if (model.layers[il].bv) {
  5211. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5212. cb(Vcur, "Vcur", il);
  5213. }
  5214. Qcur = ggml_rope_custom(
  5215. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  5216. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5217. ext_factor, attn_factor, beta_fast, beta_slow
  5218. );
  5219. cb(Qcur, "Qcur", il);
  5220. Kcur = ggml_rope_custom(
  5221. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  5222. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5223. ext_factor, attn_factor, beta_fast, beta_slow
  5224. );
  5225. cb(Kcur, "Kcur", il);
  5226. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5227. model.layers[il].wo, NULL,
  5228. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5229. cb(cur, "kqv_out", il);
  5230. }
  5231. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5232. cb(ffn_inp, "ffn_inp", il);
  5233. // feed-forward network
  5234. {
  5235. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5236. model.layers[il].ffn_norm,
  5237. model.layers[il].ffn_norm_b,
  5238. LLM_NORM, cb, il);
  5239. cb(cur, "ffn_norm", il);
  5240. cur = llm_build_ffn(ctx0, cur,
  5241. model.layers[il].ffn_up, NULL,
  5242. model.layers[il].ffn_gate, NULL,
  5243. model.layers[il].ffn_down, NULL,
  5244. NULL,
  5245. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5246. cb(cur, "ffn_out", il);
  5247. }
  5248. cur = ggml_add(ctx0, cur, ffn_inp);
  5249. cb(cur, "l_out", il);
  5250. // input for next layer
  5251. inpL = cur;
  5252. }
  5253. cur = inpL;
  5254. cur = llm_build_norm(ctx0, cur, hparams,
  5255. model.output_norm,
  5256. model.output_norm_b,
  5257. LLM_NORM, cb, -1);
  5258. cb(cur, "result_norm", -1);
  5259. // lm_head
  5260. cur = ggml_mul_mat(ctx0, model.output, cur);
  5261. cb(cur, "result_output", -1);
  5262. ggml_build_forward_expand(gf, cur);
  5263. return gf;
  5264. }
  5265. struct ggml_cgraph * build_qwen() {
  5266. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5267. const int64_t n_embd_head = hparams.n_embd_head_v;
  5268. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5269. struct ggml_tensor * cur;
  5270. struct ggml_tensor * inpL;
  5271. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5272. cb(inpL, "inp_embd", -1);
  5273. // inp_pos - contains the positions
  5274. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5275. cb(inp_pos, "inp_pos", -1);
  5276. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5277. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5278. cb(KQ_mask, "KQ_mask", -1);
  5279. // shift the entire K-cache if needed
  5280. if (do_rope_shift) {
  5281. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  5282. }
  5283. for (int il = 0; il < n_layer; ++il) {
  5284. struct ggml_tensor * inpSA = inpL;
  5285. cur = llm_build_norm(ctx0, inpL, hparams,
  5286. model.layers[il].attn_norm, NULL,
  5287. LLM_NORM_RMS, cb, il);
  5288. cb(cur, "attn_norm", il);
  5289. // self-attention
  5290. {
  5291. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  5292. cb(cur, "wqkv", il);
  5293. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5294. cb(cur, "bqkv", il);
  5295. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5296. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5297. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd)));
  5298. cb(Qcur, "Qcur", il);
  5299. cb(Kcur, "Kcur", il);
  5300. cb(Vcur, "Vcur", il);
  5301. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5302. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5303. // using mode = 2 for neox mode
  5304. Qcur = ggml_rope_custom(
  5305. ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  5306. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  5307. );
  5308. cb(Qcur, "Qcur", il);
  5309. Kcur = ggml_rope_custom(
  5310. ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  5311. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  5312. );
  5313. cb(Kcur, "Kcur", il);
  5314. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5315. model.layers[il].wo, NULL,
  5316. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5317. cb(cur, "kqv_out", il);
  5318. }
  5319. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5320. cb(ffn_inp, "ffn_inp", il);
  5321. // feed-forward forward
  5322. {
  5323. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5324. model.layers[il].ffn_norm, NULL,
  5325. LLM_NORM_RMS, cb, il);
  5326. cb(cur, "ffn_norm", il);
  5327. cur = llm_build_ffn(ctx0, cur,
  5328. model.layers[il].ffn_up, NULL,
  5329. model.layers[il].ffn_gate, NULL,
  5330. model.layers[il].ffn_down, NULL,
  5331. NULL,
  5332. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5333. cb(cur, "ffn_out", il);
  5334. }
  5335. cur = ggml_add(ctx0, cur, ffn_inp);
  5336. cb(cur, "l_out", il);
  5337. // input for next layer
  5338. inpL = cur;
  5339. }
  5340. cur = inpL;
  5341. cur = llm_build_norm(ctx0, cur, hparams,
  5342. model.output_norm, NULL,
  5343. LLM_NORM_RMS, cb, -1);
  5344. cb(cur, "result_norm", -1);
  5345. // lm_head
  5346. cur = ggml_mul_mat(ctx0, model.output, cur);
  5347. cb(cur, "result_output", -1);
  5348. ggml_build_forward_expand(gf, cur);
  5349. return gf;
  5350. }
  5351. struct ggml_cgraph * build_qwen2() {
  5352. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5353. const int64_t n_embd_head = hparams.n_embd_head_v;
  5354. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5355. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5356. struct ggml_tensor * cur;
  5357. struct ggml_tensor * inpL;
  5358. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5359. cb(inpL, "inp_embd", -1);
  5360. // inp_pos - contains the positions
  5361. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5362. cb(inp_pos, "inp_pos", -1);
  5363. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5364. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5365. cb(KQ_mask, "KQ_mask", -1);
  5366. // shift the entire K-cache if needed
  5367. if (do_rope_shift) {
  5368. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  5369. }
  5370. for (int il = 0; il < n_layer; ++il) {
  5371. struct ggml_tensor * inpSA = inpL;
  5372. // norm
  5373. cur = llm_build_norm(ctx0, inpL, hparams,
  5374. model.layers[il].attn_norm, NULL,
  5375. LLM_NORM_RMS, cb, il);
  5376. cb(cur, "attn_norm", il);
  5377. // self-attention
  5378. {
  5379. // compute Q and K and RoPE them
  5380. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  5381. cb(Qcur, "Qcur", il);
  5382. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5383. cb(Qcur, "Qcur", il);
  5384. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  5385. cb(Kcur, "Kcur", il);
  5386. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5387. cb(Kcur, "Kcur", il);
  5388. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  5389. cb(Vcur, "Vcur", il);
  5390. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5391. cb(Vcur, "Vcur", il);
  5392. // these nodes are added to the graph together so that they are not reordered
  5393. // by doing so, the number of splits in the graph is reduced
  5394. ggml_build_forward_expand(gf, Qcur);
  5395. ggml_build_forward_expand(gf, Kcur);
  5396. ggml_build_forward_expand(gf, Vcur);
  5397. Qcur = ggml_rope_custom(
  5398. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  5399. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5400. ext_factor, attn_factor, beta_fast, beta_slow
  5401. );
  5402. cb(Qcur, "Qcur", il);
  5403. Kcur = ggml_rope_custom(
  5404. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  5405. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5406. ext_factor, attn_factor, beta_fast, beta_slow
  5407. );
  5408. cb(Kcur, "Kcur", il);
  5409. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5410. model.layers[il].wo, model.layers[il].bo,
  5411. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5412. cb(cur, "kqv_out", il);
  5413. }
  5414. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5415. cb(ffn_inp, "ffn_inp", il);
  5416. // feed-forward network
  5417. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5418. model.layers[il].ffn_norm, NULL,
  5419. LLM_NORM_RMS, cb, il);
  5420. cb(cur, "ffn_norm", il);
  5421. cur = llm_build_ffn(ctx0, cur,
  5422. model.layers[il].ffn_up, NULL,
  5423. model.layers[il].ffn_gate, NULL,
  5424. model.layers[il].ffn_down, NULL,
  5425. NULL,
  5426. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5427. cb(cur, "ffn_out", il);
  5428. cur = ggml_add(ctx0, cur, ffn_inp);
  5429. cb(cur, "l_out", il);
  5430. // input for next layer
  5431. inpL = cur;
  5432. }
  5433. cur = inpL;
  5434. cur = llm_build_norm(ctx0, cur, hparams,
  5435. model.output_norm, NULL,
  5436. LLM_NORM_RMS, cb, -1);
  5437. cb(cur, "result_norm", -1);
  5438. // lm_head
  5439. cur = ggml_mul_mat(ctx0, model.output, cur);
  5440. cb(cur, "result_output", -1);
  5441. ggml_build_forward_expand(gf, cur);
  5442. return gf;
  5443. }
  5444. struct ggml_cgraph * build_phi2() {
  5445. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5446. const int64_t n_embd_head = hparams.n_embd_head_v;
  5447. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5448. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5449. struct ggml_tensor * cur;
  5450. struct ggml_tensor * attn_norm_output;
  5451. struct ggml_tensor * ffn_output;
  5452. struct ggml_tensor * inpL;
  5453. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5454. cb(inpL, "inp_embd", -1);
  5455. // inp_pos - contains the positions
  5456. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5457. cb(inp_pos, "inp_pos", -1);
  5458. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5459. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5460. cb(KQ_mask, "KQ_mask", -1);
  5461. // shift the entire K-cache if needed
  5462. if (do_rope_shift) {
  5463. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  5464. }
  5465. for (int il = 0; il < n_layer; ++il) {
  5466. attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
  5467. model.layers[il].attn_norm,
  5468. model.layers[il].attn_norm_b,
  5469. LLM_NORM, cb, il);
  5470. cb(attn_norm_output, "attn_norm", il);
  5471. // self-attention
  5472. {
  5473. struct ggml_tensor * Qcur = nullptr;
  5474. struct ggml_tensor * Kcur = nullptr;
  5475. struct ggml_tensor * Vcur = nullptr;
  5476. if (model.layers[il].wqkv) {
  5477. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output);
  5478. cb(cur, "wqkv", il);
  5479. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5480. cb(cur, "bqkv", il);
  5481. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5482. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5483. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5484. } else {
  5485. Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq);
  5486. Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk);
  5487. Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv);
  5488. }
  5489. cb(Qcur, "Qcur", il);
  5490. cb(Kcur, "Kcur", il);
  5491. cb(Vcur, "Vcur", il);
  5492. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5493. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5494. Qcur = ggml_rope_custom(
  5495. ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  5496. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  5497. );
  5498. cb(Qcur, "Qcur", il);
  5499. // with phi2, we scale the Q to avoid precision issues
  5500. // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
  5501. Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head)));
  5502. cb(Qcur, "Qcur", il);
  5503. Kcur = ggml_rope_custom(
  5504. ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  5505. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  5506. );
  5507. cb(Kcur, "Kcur", il);
  5508. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5509. model.layers[il].wo, model.layers[il].bo,
  5510. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f, cb, il);
  5511. cb(cur, "kqv_out", il);
  5512. }
  5513. // FF
  5514. {
  5515. ffn_output = llm_build_ffn(ctx0, attn_norm_output,
  5516. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  5517. NULL, NULL,
  5518. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  5519. NULL,
  5520. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5521. cb(ffn_output, "ffn_out", il);
  5522. }
  5523. cur = ggml_add(ctx0, cur, ffn_output);
  5524. cb(cur, "l_out", il);
  5525. cur = ggml_add(ctx0, cur, inpL);
  5526. cb(cur, "l_out", il);
  5527. inpL = cur;
  5528. }
  5529. cur = llm_build_norm(ctx0, inpL, hparams,
  5530. model.output_norm,
  5531. model.output_norm_b,
  5532. LLM_NORM, cb, -1);
  5533. cb(cur, "result_norm", -1);
  5534. cur = ggml_mul_mat(ctx0, model.output, cur);
  5535. cb(cur, "result_output_no_bias", -1);
  5536. cur = ggml_add(ctx0, cur, model.output_b);
  5537. cb(cur, "result_output", -1);
  5538. ggml_build_forward_expand(gf, cur);
  5539. return gf;
  5540. }
  5541. struct ggml_cgraph * build_plamo() {
  5542. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  5543. const int64_t n_embd_head = hparams.n_embd_head_v;
  5544. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5545. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5546. struct ggml_tensor * cur;
  5547. struct ggml_tensor * inpL;
  5548. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5549. cb(inpL, "inp_embd", -1);
  5550. // inp_pos - contains the positions
  5551. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5552. cb(inp_pos, "inp_pos", -1);
  5553. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5554. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5555. cb(KQ_mask, "KQ_mask", -1);
  5556. // shift the entire K-cache if needed
  5557. if (do_rope_shift) {
  5558. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  5559. }
  5560. for (int il = 0; il < n_layer; ++il) {
  5561. // norm
  5562. cur = llm_build_norm(ctx0, inpL, hparams,
  5563. model.layers[il].attn_norm, NULL,
  5564. LLM_NORM_RMS, cb, il);
  5565. cb(cur, "attn_norm", il);
  5566. struct ggml_tensor * attention_norm = cur;
  5567. // self-attention
  5568. {
  5569. // compute Q and K and RoPE them
  5570. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  5571. cb(Qcur, "Qcur", il);
  5572. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  5573. cb(Kcur, "Kcur", il);
  5574. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  5575. cb(Vcur, "Vcur", il);
  5576. Qcur = ggml_rope_custom(
  5577. ctx0, ggml_reshape_3d(ctx0, Qcur, hparams.n_rot, n_head, n_tokens), inp_pos,
  5578. n_embd_head, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5579. ext_factor, attn_factor, beta_fast, beta_slow);
  5580. cb(Qcur, "Qcur", il);
  5581. Kcur = ggml_rope_custom(
  5582. ctx0, ggml_reshape_3d(ctx0, Kcur, hparams.n_rot, n_head_kv, n_tokens), inp_pos,
  5583. n_embd_head, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5584. ext_factor, attn_factor, beta_fast, beta_slow);
  5585. cb(Kcur, "Kcur", il);
  5586. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5587. model.layers[il].wo, NULL,
  5588. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5589. cb(cur, "kqv_out", il);
  5590. }
  5591. struct ggml_tensor * sa_out = cur;
  5592. cur = attention_norm;
  5593. // feed-forward network
  5594. {
  5595. cur = llm_build_ffn(ctx0, cur,
  5596. model.layers[il].ffn_up, NULL,
  5597. model.layers[il].ffn_gate, NULL,
  5598. model.layers[il].ffn_down, NULL,
  5599. NULL,
  5600. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5601. cb(cur, "ffn_out", il);
  5602. }
  5603. cur = ggml_add(ctx0, cur, sa_out);
  5604. cb(cur, "l_out", il);
  5605. cur = ggml_add(ctx0, cur, inpL);
  5606. cb(cur, "l_out", il);
  5607. // input for next layer
  5608. inpL = cur;
  5609. }
  5610. cur = inpL;
  5611. cur = llm_build_norm(ctx0, cur, hparams,
  5612. model.output_norm, NULL,
  5613. LLM_NORM_RMS, cb, -1);
  5614. cb(cur, "result_norm", -1);
  5615. // lm_head
  5616. cur = ggml_mul_mat(ctx0, model.output, cur);
  5617. cb(cur, "result_output", -1);
  5618. ggml_build_forward_expand(gf, cur);
  5619. return gf;
  5620. }
  5621. struct ggml_cgraph * build_gpt2() {
  5622. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5623. const int64_t n_embd_head = hparams.n_embd_head_v;
  5624. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5625. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5626. struct ggml_tensor * cur;
  5627. struct ggml_tensor * pos;
  5628. struct ggml_tensor * inpL;
  5629. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5630. cb(inpL, "inp_embd", -1);
  5631. // inp_pos - contains the positions
  5632. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5633. cb(inp_pos, "inp_pos", -1);
  5634. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5635. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5636. cb(KQ_mask, "KQ_mask", -1);
  5637. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  5638. cb(pos, "pos_embd", -1);
  5639. inpL = ggml_add(ctx0, inpL, pos);
  5640. cb(inpL, "inpL", -1);
  5641. for (int il = 0; il < n_layer; ++il) {
  5642. cur = llm_build_norm(ctx0, inpL, hparams,
  5643. model.layers[il].attn_norm,
  5644. model.layers[il].attn_norm_b,
  5645. LLM_NORM, cb, il);
  5646. cb(cur, "attn_norm", il);
  5647. // self-attention
  5648. {
  5649. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  5650. cb(cur, "wqkv", il);
  5651. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5652. cb(cur, "bqkv", il);
  5653. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5654. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5655. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5656. cb(Qcur, "Qcur", il);
  5657. cb(Kcur, "Kcur", il);
  5658. cb(Vcur, "Vcur", il);
  5659. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5660. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5661. model.layers[il].wo, model.layers[il].bo,
  5662. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5663. cb(cur, "kqv_out", il);
  5664. }
  5665. // add the input
  5666. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5667. cb(ffn_inp, "ffn_inp", il);
  5668. // FF
  5669. {
  5670. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5671. model.layers[il].ffn_norm,
  5672. model.layers[il].ffn_norm_b,
  5673. LLM_NORM, cb, il);
  5674. cb(cur, "ffn_norm", il);
  5675. cur = llm_build_ffn(ctx0, cur,
  5676. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  5677. NULL, NULL,
  5678. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  5679. NULL,
  5680. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5681. cb(cur, "ffn_out", il);
  5682. }
  5683. inpL = ggml_add(ctx0, cur, ffn_inp);
  5684. cb(inpL, "l_out", il);
  5685. }
  5686. cur = llm_build_norm(ctx0, inpL, hparams,
  5687. model.output_norm,
  5688. model.output_norm_b,
  5689. LLM_NORM, cb, -1);
  5690. cb(cur, "result_norm", -1);
  5691. cur = ggml_mul_mat(ctx0, model.output, cur);
  5692. cb(cur, "result_output", -1);
  5693. ggml_build_forward_expand(gf, cur);
  5694. return gf;
  5695. }
  5696. struct ggml_cgraph * build_codeshell() {
  5697. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5698. const int64_t n_embd_head = hparams.n_embd_head_v;
  5699. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5700. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5701. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5702. struct ggml_tensor * cur;
  5703. struct ggml_tensor * inpL;
  5704. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5705. cb(inpL, "inp_embd", -1);
  5706. // inp_pos - contains the positions
  5707. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5708. cb(inp_pos, "inp_pos", -1);
  5709. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5710. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5711. cb(KQ_mask, "KQ_mask", -1);
  5712. // shift the entire K-cache if needed
  5713. if (do_rope_shift) {
  5714. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  5715. }
  5716. for (int il = 0; il < n_layer; ++il) {
  5717. cur = llm_build_norm(ctx0, inpL, hparams,
  5718. model.layers[il].attn_norm,
  5719. model.layers[il].attn_norm_b,
  5720. LLM_NORM, cb, il);
  5721. cb(cur, "attn_norm", il);
  5722. // self-attention
  5723. {
  5724. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  5725. cb(cur, "wqkv", il);
  5726. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5727. cb(cur, "bqkv", il);
  5728. struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5729. struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5730. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5731. cb(tmpq, "tmpq", il);
  5732. cb(tmpk, "tmpk", il);
  5733. cb(Vcur, "Vcur", il);
  5734. struct ggml_tensor * Qcur = ggml_rope_custom(
  5735. ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), inp_pos,
  5736. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5737. ext_factor, attn_factor, beta_fast, beta_slow
  5738. );
  5739. cb(Qcur, "Qcur", il);
  5740. struct ggml_tensor * Kcur = ggml_rope_custom(
  5741. ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), inp_pos,
  5742. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5743. ext_factor, attn_factor, beta_fast, beta_slow
  5744. );
  5745. cb(Kcur, "Kcur", il);
  5746. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5747. model.layers[il].wo, model.layers[il].bo,
  5748. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5749. cb(cur, "kqv_out", il);
  5750. }
  5751. // add the input
  5752. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5753. cb(ffn_inp, "ffn_inp", il);
  5754. // FF
  5755. {
  5756. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5757. model.layers[il].ffn_norm,
  5758. model.layers[il].ffn_norm_b,
  5759. LLM_NORM, cb, il);
  5760. cb(cur, "ffn_norm", il);
  5761. cur = llm_build_ffn(ctx0, cur,
  5762. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  5763. NULL, NULL,
  5764. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  5765. NULL,
  5766. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5767. cb(cur, "ffn_out", il);
  5768. }
  5769. inpL = ggml_add(ctx0, cur, ffn_inp);
  5770. cb(inpL, "l_out", il);
  5771. }
  5772. cur = llm_build_norm(ctx0, inpL, hparams,
  5773. model.output_norm,
  5774. model.output_norm_b,
  5775. LLM_NORM, cb, -1);
  5776. cb(cur, "result_norm", -1);
  5777. cur = ggml_mul_mat(ctx0, model.output, cur);
  5778. cb(cur, "result_output", -1);
  5779. ggml_build_forward_expand(gf, cur);
  5780. return gf;
  5781. }
  5782. struct ggml_cgraph * build_orion() {
  5783. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5784. const int64_t n_embd_head = hparams.n_embd_head_v;
  5785. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5786. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5787. struct ggml_tensor * cur;
  5788. struct ggml_tensor * inpL;
  5789. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5790. cb(inpL, "inp_embd", -1);
  5791. // inp_pos - contains the positions
  5792. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5793. cb(inp_pos, "inp_pos", -1);
  5794. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5795. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5796. cb(KQ_mask, "KQ_mask", -1);
  5797. // shift the entire K-cache if needed
  5798. if (do_rope_shift) {
  5799. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  5800. }
  5801. for (int il = 0; il < n_layer; ++il) {
  5802. struct ggml_tensor * inpSA = inpL;
  5803. // norm
  5804. cur = llm_build_norm(ctx0, inpL, hparams,
  5805. model.layers[il].attn_norm, model.layers[il].attn_norm_b,
  5806. LLM_NORM, cb, il);
  5807. cb(cur, "attn_norm", il);
  5808. // self-attention
  5809. {
  5810. // compute Q and K and RoPE them
  5811. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  5812. cb(Qcur, "Qcur", il);
  5813. // if (model.layers[il].bq) {
  5814. // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5815. // cb(Qcur, "Qcur", il);
  5816. // }
  5817. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  5818. cb(Kcur, "Kcur", il);
  5819. // if (model.layers[il].bk) {
  5820. // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5821. // cb(Kcur, "Kcur", il);
  5822. // }
  5823. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  5824. cb(Vcur, "Vcur", il);
  5825. // if (model.layers[il].bv) {
  5826. // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5827. // cb(Vcur, "Vcur", il);
  5828. // }
  5829. Qcur = ggml_rope_custom(
  5830. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  5831. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5832. ext_factor, attn_factor, beta_fast, beta_slow
  5833. );
  5834. cb(Qcur, "Qcur", il);
  5835. Kcur = ggml_rope_custom(
  5836. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  5837. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5838. ext_factor, attn_factor, beta_fast, beta_slow
  5839. );
  5840. cb(Kcur, "Kcur", il);
  5841. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5842. model.layers[il].wo, NULL,
  5843. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5844. cb(cur, "kqv_out", il);
  5845. }
  5846. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5847. cb(ffn_inp, "ffn_inp", il);
  5848. // feed-forward network
  5849. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5850. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  5851. LLM_NORM, cb, il);
  5852. cb(cur, "ffn_norm", il);
  5853. cur = llm_build_ffn(ctx0, cur,
  5854. model.layers[il].ffn_up, NULL,
  5855. model.layers[il].ffn_gate, NULL,
  5856. model.layers[il].ffn_down, NULL,
  5857. NULL,
  5858. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5859. cb(cur, "ffn_out", il);
  5860. cur = ggml_add(ctx0, cur, ffn_inp);
  5861. cb(cur, "l_out", il);
  5862. // input for next layer
  5863. inpL = cur;
  5864. }
  5865. cur = inpL;
  5866. cur = llm_build_norm(ctx0, cur, hparams,
  5867. model.output_norm, model.output_norm_b,
  5868. LLM_NORM, cb, -1);
  5869. cb(cur, "result_norm", -1);
  5870. // lm_head
  5871. cur = ggml_mul_mat(ctx0, model.output, cur);
  5872. cb(cur, "result_output", -1);
  5873. ggml_build_forward_expand(gf, cur);
  5874. return gf;
  5875. }
  5876. struct ggml_cgraph * build_internlm2() {
  5877. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5878. const int64_t n_embd_head = hparams.n_embd_head_v;
  5879. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5880. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5881. struct ggml_tensor * cur;
  5882. struct ggml_tensor * inpL;
  5883. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5884. cb(inpL, "inp_embd", -1);
  5885. // inp_pos - contains the positions
  5886. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5887. cb(inp_pos, "inp_pos", -1);
  5888. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5889. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5890. cb(KQ_mask, "KQ_mask", -1);
  5891. // shift the entire K-cache if needed
  5892. if (do_rope_shift) {
  5893. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  5894. }
  5895. for (int il = 0; il < n_layer; ++il) {
  5896. struct ggml_tensor * inpSA = inpL;
  5897. // norm
  5898. cur = llm_build_norm(ctx0, inpL, hparams,
  5899. model.layers[il].attn_norm, NULL,
  5900. LLM_NORM_RMS, cb, il);
  5901. cb(cur, "attn_norm", il);
  5902. // self-attention
  5903. {
  5904. // compute Q and K and RoPE them
  5905. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  5906. cb(Qcur, "Qcur", il);
  5907. if (model.layers[il].bq) {
  5908. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5909. cb(Qcur, "Qcur", il);
  5910. }
  5911. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  5912. cb(Kcur, "Kcur", il);
  5913. if (model.layers[il].bk) {
  5914. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5915. cb(Kcur, "Kcur", il);
  5916. }
  5917. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  5918. cb(Vcur, "Vcur", il);
  5919. if (model.layers[il].bv) {
  5920. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5921. cb(Vcur, "Vcur", il);
  5922. }
  5923. Qcur = ggml_rope_custom(
  5924. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  5925. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  5926. ext_factor, attn_factor, beta_fast, beta_slow
  5927. );
  5928. cb(Qcur, "Qcur", il);
  5929. Kcur = ggml_rope_custom(
  5930. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  5931. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  5932. ext_factor, attn_factor, beta_fast, beta_slow
  5933. );
  5934. cb(Kcur, "Kcur", il);
  5935. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5936. model.layers[il].wo, model.layers[il].bo,
  5937. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5938. cb(cur, "kqv_out", il);
  5939. }
  5940. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5941. cb(ffn_inp, "ffn_inp", il);
  5942. // feed-forward network
  5943. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5944. model.layers[il].ffn_norm, NULL,
  5945. LLM_NORM_RMS, cb, il);
  5946. cb(cur, "ffn_norm", il);
  5947. cur = llm_build_ffn(ctx0, cur,
  5948. model.layers[il].ffn_up, NULL,
  5949. model.layers[il].ffn_gate, NULL,
  5950. model.layers[il].ffn_down, NULL,
  5951. NULL,
  5952. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5953. cb(cur, "ffn_out", il);
  5954. cur = ggml_add(ctx0, cur, ffn_inp);
  5955. cb(cur, "l_out", il);
  5956. // input for next layer
  5957. inpL = cur;
  5958. }
  5959. cur = inpL;
  5960. cur = llm_build_norm(ctx0, cur, hparams,
  5961. model.output_norm, NULL,
  5962. LLM_NORM_RMS, cb, -1);
  5963. cb(cur, "result_norm", -1);
  5964. // lm_head
  5965. cur = ggml_mul_mat(ctx0, model.output, cur);
  5966. cb(cur, "result_output", -1);
  5967. ggml_build_forward_expand(gf, cur);
  5968. return gf;
  5969. }
  5970. // ref: https://arxiv.org/abs/2203.03466
  5971. // https://github.com/ggerganov/llama.cpp/issues/5276#issuecomment-1925774738
  5972. // based on the original build_llama() function
  5973. struct ggml_cgraph * build_minicpm() {
  5974. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5975. const int64_t n_embd_head = hparams.n_embd_head_v;
  5976. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5977. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5978. const int64_t n_embd = hparams.n_embd;
  5979. //TODO: if the model varies, these parameters need to be read from the model
  5980. const int64_t n_embd_base = 256;
  5981. const float scale_embd = 12.0f;
  5982. const float scale_depth = 1.4f;
  5983. struct ggml_tensor * cur;
  5984. struct ggml_tensor * inpL;
  5985. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5986. cb(inpL, "inp_embd", -1);
  5987. // scale the input embeddings
  5988. inpL = ggml_scale(ctx0, inpL, scale_embd);
  5989. cb(inpL, "inp_scaled", -1);
  5990. // inp_pos - contains the positions
  5991. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5992. cb(inp_pos, "inp_pos", -1);
  5993. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5994. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5995. cb(KQ_mask, "KQ_mask", -1);
  5996. // shift the entire K-cache if needed
  5997. if (do_rope_shift) {
  5998. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  5999. }
  6000. for (int il = 0; il < n_layer; ++il) {
  6001. struct ggml_tensor * inpSA = inpL;
  6002. // norm
  6003. cur = llm_build_norm(ctx0, inpL, hparams,
  6004. model.layers[il].attn_norm, NULL,
  6005. LLM_NORM_RMS, cb, il);
  6006. cb(cur, "attn_norm", il);
  6007. // self-attention
  6008. {
  6009. // compute Q and K and RoPE them
  6010. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6011. cb(Qcur, "Qcur", il);
  6012. if (model.layers[il].bq) {
  6013. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6014. cb(Qcur, "Qcur", il);
  6015. }
  6016. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6017. cb(Kcur, "Kcur", il);
  6018. if (model.layers[il].bk) {
  6019. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6020. cb(Kcur, "Kcur", il);
  6021. }
  6022. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6023. cb(Vcur, "Vcur", il);
  6024. if (model.layers[il].bv) {
  6025. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6026. cb(Vcur, "Vcur", il);
  6027. }
  6028. Qcur = ggml_rope_custom(
  6029. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  6030. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  6031. ext_factor, attn_factor, beta_fast, beta_slow
  6032. );
  6033. cb(Qcur, "Qcur", il);
  6034. Kcur = ggml_rope_custom(
  6035. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  6036. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  6037. ext_factor, attn_factor, beta_fast, beta_slow
  6038. );
  6039. cb(Kcur, "Kcur", il);
  6040. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  6041. model.layers[il].wo, model.layers[il].bo,
  6042. Kcur, Vcur, Qcur, KQ_mask, n_ctx, n_tokens, kv_head, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6043. cb(cur, "kqv_out", il);
  6044. }
  6045. // scale_res - scale the hidden states for residual connection
  6046. const float scale_res = scale_depth/sqrtf(float(n_layer));
  6047. cur = ggml_scale(ctx0, cur, scale_res);
  6048. cb(cur, "hidden_scaled", -1);
  6049. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6050. cb(ffn_inp, "ffn_inp", il);
  6051. // feed-forward network
  6052. {
  6053. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6054. model.layers[il].ffn_norm, NULL,
  6055. LLM_NORM_RMS, cb, il);
  6056. cb(cur, "ffn_norm", il);
  6057. cur = llm_build_ffn(ctx0, cur,
  6058. model.layers[il].ffn_up, NULL,
  6059. model.layers[il].ffn_gate, NULL,
  6060. model.layers[il].ffn_down, NULL,
  6061. NULL,
  6062. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  6063. cb(cur, "ffn_out", il);
  6064. }
  6065. // scale the hidden states for residual connection
  6066. cur = ggml_scale(ctx0, cur, scale_res);
  6067. cb(cur, "hidden_scaled_ffn", -1);
  6068. cur = ggml_add(ctx0, cur, ffn_inp);
  6069. cb(cur, "l_out", il);
  6070. // input for next layer
  6071. inpL = cur;
  6072. }
  6073. cur = inpL;
  6074. cur = llm_build_norm(ctx0, cur, hparams,
  6075. model.output_norm, NULL,
  6076. LLM_NORM_RMS, cb, -1);
  6077. cb(cur, "result_norm", -1);
  6078. // lm_head scaling
  6079. const float scale_lmhead = float(n_embd_base)/float(n_embd);
  6080. cur = ggml_scale(ctx0, cur, scale_lmhead);
  6081. cb(cur, "lmhead_scaling", -1);
  6082. // lm_head
  6083. cur = ggml_mul_mat(ctx0, model.tok_embd, cur);
  6084. cb(cur, "result_output", -1);
  6085. ggml_build_forward_expand(gf, cur);
  6086. return gf;
  6087. }
  6088. };
  6089. static struct ggml_cgraph * llama_build_graph(
  6090. llama_context & lctx,
  6091. const llama_batch & batch,
  6092. bool worst_case) {
  6093. const auto & model = lctx.model;
  6094. // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
  6095. llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
  6096. if (il >= 0) {
  6097. ggml_format_name(cur, "%s-%d", name, il);
  6098. } else {
  6099. ggml_set_name(cur, name);
  6100. }
  6101. if (!lctx.cparams.offload_kqv) {
  6102. if (strcmp(name, "kqv_merged_cont") == 0) {
  6103. // all nodes between the KV store and the attention output are run on the CPU
  6104. ggml_backend_sched_set_node_backend(lctx.sched, cur, lctx.backend_cpu);
  6105. }
  6106. }
  6107. };
  6108. struct ggml_cgraph * result = NULL;
  6109. struct llm_build_context llm(lctx, batch, cb, worst_case);
  6110. llm.init();
  6111. switch (model.arch) {
  6112. case LLM_ARCH_LLAMA:
  6113. {
  6114. result = llm.build_llama();
  6115. } break;
  6116. case LLM_ARCH_BAICHUAN:
  6117. {
  6118. result = llm.build_baichuan();
  6119. } break;
  6120. case LLM_ARCH_FALCON:
  6121. {
  6122. result = llm.build_falcon();
  6123. } break;
  6124. case LLM_ARCH_STARCODER:
  6125. {
  6126. result = llm.build_starcoder();
  6127. } break;
  6128. case LLM_ARCH_PERSIMMON:
  6129. {
  6130. result = llm.build_persimmon();
  6131. } break;
  6132. case LLM_ARCH_REFACT:
  6133. {
  6134. result = llm.build_refact();
  6135. } break;
  6136. case LLM_ARCH_BERT:
  6137. case LLM_ARCH_NOMIC_BERT:
  6138. {
  6139. result = llm.build_bert();
  6140. } break;
  6141. case LLM_ARCH_BLOOM:
  6142. {
  6143. result = llm.build_bloom();
  6144. } break;
  6145. case LLM_ARCH_MPT:
  6146. {
  6147. result = llm.build_mpt();
  6148. } break;
  6149. case LLM_ARCH_STABLELM:
  6150. {
  6151. result = llm.build_stablelm();
  6152. } break;
  6153. case LLM_ARCH_QWEN:
  6154. {
  6155. result = llm.build_qwen();
  6156. } break;
  6157. case LLM_ARCH_QWEN2:
  6158. {
  6159. result = llm.build_qwen2();
  6160. } break;
  6161. case LLM_ARCH_PHI2:
  6162. {
  6163. result = llm.build_phi2();
  6164. } break;
  6165. case LLM_ARCH_PLAMO:
  6166. {
  6167. result = llm.build_plamo();
  6168. } break;
  6169. case LLM_ARCH_GPT2:
  6170. {
  6171. result = llm.build_gpt2();
  6172. } break;
  6173. case LLM_ARCH_CODESHELL:
  6174. {
  6175. result = llm.build_codeshell();
  6176. } break;
  6177. case LLM_ARCH_ORION:
  6178. {
  6179. result = llm.build_orion();
  6180. } break;
  6181. case LLM_ARCH_INTERNLM2:
  6182. {
  6183. result = llm.build_internlm2();
  6184. } break;
  6185. case LLM_ARCH_MINICPM:
  6186. {
  6187. result = llm.build_minicpm();
  6188. } break;
  6189. default:
  6190. GGML_ASSERT(false);
  6191. }
  6192. llm.free();
  6193. return result;
  6194. }
  6195. static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
  6196. //
  6197. // set input data
  6198. //
  6199. const auto & hparams = lctx.model.hparams;
  6200. const auto & cparams = lctx.cparams;
  6201. const auto & kv_self = lctx.kv_self;
  6202. if (batch.token) {
  6203. const int64_t n_tokens = batch.n_tokens;
  6204. ggml_backend_tensor_set(lctx.inp_tokens, batch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens));
  6205. }
  6206. if (batch.embd) {
  6207. const int64_t n_embd = hparams.n_embd;
  6208. const int64_t n_tokens = batch.n_tokens;
  6209. ggml_backend_tensor_set(lctx.inp_embd, batch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd));
  6210. }
  6211. if (batch.pos) {
  6212. const int64_t n_tokens = batch.n_tokens;
  6213. ggml_backend_tensor_set(lctx.inp_pos, batch.pos, 0, n_tokens*ggml_element_size(lctx.inp_pos));
  6214. }
  6215. {
  6216. const int64_t n_kv = kv_self.n;
  6217. const int64_t n_tokens = batch.n_tokens;
  6218. assert(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
  6219. float * data = (float *) lctx.inp_KQ_mask->data;
  6220. for (int h = 0; h < 1; ++h) {
  6221. for (int j = 0; j < n_tokens; ++j) {
  6222. const llama_pos pos = batch.pos[j];
  6223. const llama_seq_id seq_id = batch.seq_id[j][0];
  6224. for (int i = 0; i < n_kv; ++i) {
  6225. float f;
  6226. if (!lctx.kv_self.cells[i].has_seq_id(seq_id) ||
  6227. (hparams.causal_attn && lctx.kv_self.cells[i].pos > pos)) {
  6228. f = -INFINITY;
  6229. } else {
  6230. f = 0;
  6231. }
  6232. data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
  6233. }
  6234. }
  6235. }
  6236. }
  6237. {
  6238. assert(ggml_backend_buffer_is_host(lctx.inp_sum->buffer));
  6239. float * data = (float *) lctx.inp_sum->data;
  6240. for (int i = 0; i < batch.n_tokens; ++i) {
  6241. data[i] = 1.0f/float(batch.n_tokens);
  6242. }
  6243. }
  6244. if (kv_self.has_shift) {
  6245. const int64_t n_ctx = cparams.n_ctx;
  6246. assert(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer));
  6247. int32_t * data = (int32_t *) lctx.inp_K_shift->data;
  6248. for (int i = 0; i < n_ctx; ++i) {
  6249. data[i] = lctx.kv_self.cells[i].delta;
  6250. }
  6251. }
  6252. if (hparams.pooling_layer && cparams.do_pooling) {
  6253. const int64_t n_tokens = batch.n_tokens;
  6254. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_sum->buffer));
  6255. float * data = (float *) lctx.inp_sum->data;
  6256. memset(lctx.inp_sum->data, 0, batch.n_tokens * batch.n_tokens * ggml_element_size(lctx.inp_sum));
  6257. for (int i = 0; i < n_tokens; ++i) {
  6258. const llama_seq_id seq_id = batch.seq_id[i][0];
  6259. data[seq_id*n_tokens + i] = 1.0f;
  6260. }
  6261. }
  6262. }
  6263. // decode a batch of tokens by evaluating the transformer
  6264. //
  6265. // - lctx: llama context
  6266. // - batch: batch to evaluate
  6267. //
  6268. // return 0 on success
  6269. // return positive int on warning
  6270. // return negative int on error
  6271. //
  6272. static int llama_decode_internal(
  6273. llama_context & lctx,
  6274. llama_batch batch) {
  6275. const uint32_t n_tokens = batch.n_tokens;
  6276. if (n_tokens == 0) {
  6277. LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
  6278. return -1;
  6279. }
  6280. const auto & model = lctx.model;
  6281. const auto & hparams = model.hparams;
  6282. const auto & cparams = lctx.cparams;
  6283. const auto n_batch = cparams.n_batch;
  6284. GGML_ASSERT(n_tokens <= n_batch);
  6285. int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
  6286. GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
  6287. const int64_t t_start_us = ggml_time_us();
  6288. #ifdef GGML_USE_MPI
  6289. // TODO: needs fix after #3228
  6290. GGML_ASSERT(false && "not implemented");
  6291. //ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
  6292. #endif
  6293. GGML_ASSERT(n_threads > 0);
  6294. auto & kv_self = lctx.kv_self;
  6295. const int64_t n_embd = hparams.n_embd;
  6296. const int64_t n_vocab = hparams.n_vocab;
  6297. // helpers for smoother batch API transition
  6298. // after deprecating the llama_eval calls, these will be removed
  6299. std::vector<llama_pos> pos;
  6300. std::vector<int32_t> n_seq_id;
  6301. std::vector<llama_seq_id *> seq_id_arr;
  6302. std::vector<std::vector<llama_seq_id>> seq_id;
  6303. if (batch.pos == nullptr) {
  6304. pos.resize(n_tokens);
  6305. for (uint32_t i = 0; i < n_tokens; i++) {
  6306. pos[i] = batch.all_pos_0 + i*batch.all_pos_1;
  6307. }
  6308. batch.pos = pos.data();
  6309. }
  6310. if (batch.seq_id == nullptr) {
  6311. n_seq_id.resize(n_tokens);
  6312. seq_id.resize(n_tokens);
  6313. seq_id_arr.resize(n_tokens);
  6314. for (uint32_t i = 0; i < n_tokens; i++) {
  6315. n_seq_id[i] = 1;
  6316. seq_id[i].resize(1);
  6317. seq_id[i][0] = batch.all_seq_id;
  6318. seq_id_arr[i] = seq_id[i].data();
  6319. }
  6320. batch.n_seq_id = n_seq_id.data();
  6321. batch.seq_id = seq_id_arr.data();
  6322. }
  6323. // if we have enough unused cells before the current head ->
  6324. // better to start searching from the beginning of the cache, hoping to fill it
  6325. if (kv_self.head > kv_self.used + 2*n_tokens) {
  6326. kv_self.head = 0;
  6327. }
  6328. if (!llama_kv_cache_find_slot(kv_self, batch)) {
  6329. return 1;
  6330. }
  6331. // a heuristic, to avoid attending the full cache if it is not yet utilized
  6332. // after enough generations, the benefit from this heuristic disappears
  6333. // if we start defragmenting the cache, the benefit from this will be more important
  6334. kv_self.n = std::min((int32_t) cparams.n_ctx, std::max(32, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32)));
  6335. //kv_self.n = llama_kv_cache_cell_max(kv_self);
  6336. //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
  6337. ggml_backend_sched_reset(lctx.sched);
  6338. ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data);
  6339. ggml_cgraph * gf = llama_build_graph(lctx, batch, false);
  6340. // the output is always the last tensor in the graph
  6341. struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
  6342. struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
  6343. if (strcmp(res->name, "result_output") == 0) {
  6344. // the embeddings could be the second to last tensor, or the third to last tensor
  6345. if (strcmp(embeddings->name, "result_norm") != 0) {
  6346. embeddings = gf->nodes[gf->n_nodes - 3];
  6347. GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
  6348. }
  6349. } else if (strcmp(res->name, "result_embd") == 0) {
  6350. embeddings = res;
  6351. res = nullptr;
  6352. } else {
  6353. GGML_ASSERT(false);
  6354. }
  6355. // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
  6356. // for big prompts, if BLAS is enabled, it is better to use only one thread
  6357. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  6358. // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
  6359. // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
  6360. // with the BLAS calls. need a better solution
  6361. // MoE Special Case: This logic applies when hparams.n_expert == 0, i.e. the model is NOT an MoE model. When an MoE is
  6362. // being processed then Accelerate/BLAS will not be involved, so capping would limit performance.
  6363. if (n_tokens >= 32 && hparams.n_expert == 0 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
  6364. n_threads = std::min(4, n_threads);
  6365. }
  6366. #ifdef GGML_USE_MPI
  6367. const int64_t n_layer = hparams.n_layer;
  6368. ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
  6369. #endif
  6370. #ifdef GGML_USE_METAL
  6371. if (ggml_backend_is_metal(lctx.backend_metal)) {
  6372. ggml_backend_metal_set_n_cb(lctx.backend_metal, n_threads);
  6373. }
  6374. #endif
  6375. if (lctx.backend_cpu != nullptr) {
  6376. ggml_backend_cpu_set_n_threads(lctx.backend_cpu, n_threads);
  6377. }
  6378. llama_set_inputs(lctx, batch);
  6379. ggml_backend_sched_graph_compute(lctx.sched, gf);
  6380. // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(lctx.sched));
  6381. #ifdef GGML_USE_MPI
  6382. ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
  6383. #endif
  6384. // update the kv ring buffer
  6385. {
  6386. if (kv_self.has_shift) {
  6387. kv_self.has_shift = false;
  6388. for (uint32_t i = 0; i < kv_self.size; ++i) {
  6389. kv_self.cells[i].delta = 0;
  6390. }
  6391. }
  6392. kv_self.head += n_tokens;
  6393. // Ensure kv cache head points to a valid index.
  6394. if (kv_self.head >= kv_self.size) {
  6395. kv_self.head = 0;
  6396. }
  6397. }
  6398. #ifdef GGML_PERF
  6399. // print timing information per ggml operation (for debugging purposes)
  6400. // requires GGML_PERF to be defined
  6401. ggml_graph_print(gf);
  6402. #endif
  6403. // plot the computation graph in dot format (for debugging purposes)
  6404. //if (n_past%100 == 0) {
  6405. // ggml_graph_dump_dot(gf, NULL, "llama.dot");
  6406. //}
  6407. // extract logits
  6408. // TODO: do not compute and extract logits if only embeddings are needed
  6409. // need to update the graphs to skip "result_output"
  6410. if (res) {
  6411. auto & logits_out = lctx.logits;
  6412. #ifndef NDEBUG
  6413. auto & logits_valid = lctx.logits_valid;
  6414. logits_valid.clear();
  6415. logits_valid.resize(n_tokens);
  6416. logits_out.clear();
  6417. #endif
  6418. ggml_backend_t res_backend = ggml_backend_sched_get_node_backend(lctx.sched, res);
  6419. GGML_ASSERT(res_backend != nullptr);
  6420. if (batch.logits) {
  6421. logits_out.resize(n_vocab * n_tokens);
  6422. for (uint32_t i = 0; i < n_tokens; i++) {
  6423. if (batch.logits[i] == 0) {
  6424. continue;
  6425. }
  6426. ggml_backend_tensor_get_async(res_backend, res, logits_out.data() + (n_vocab*i), (n_vocab*i)*sizeof(float), n_vocab*sizeof(float));
  6427. #ifndef NDEBUG
  6428. logits_valid[i] = true;
  6429. #endif
  6430. }
  6431. } else if (lctx.logits_all) {
  6432. logits_out.resize(n_vocab * n_tokens);
  6433. ggml_backend_tensor_get_async(res_backend, res, logits_out.data(), 0, n_vocab*n_tokens*sizeof(float));
  6434. #ifndef NDEBUG
  6435. std::fill(logits_valid.begin(), logits_valid.end(), true);
  6436. #endif
  6437. } else {
  6438. logits_out.resize(n_vocab);
  6439. ggml_backend_tensor_get_async(res_backend, res, logits_out.data(), (n_vocab*(n_tokens - 1))*sizeof(float), n_vocab*sizeof(float));
  6440. #ifndef NDEBUG
  6441. logits_valid[0] = true;
  6442. #endif
  6443. }
  6444. ggml_backend_synchronize(res_backend);
  6445. }
  6446. // extract embeddings
  6447. if (!lctx.embedding.empty()) {
  6448. auto & embedding_out = lctx.embedding;
  6449. const int64_t embd_pos = res ? n_embd * (n_tokens-1) : 0;
  6450. const int64_t embd_size = res ? n_embd : n_embd * n_tokens;
  6451. embedding_out.resize(embd_size);
  6452. ggml_backend_t embeddings_backend = ggml_backend_sched_get_node_backend(lctx.sched, embeddings);
  6453. ggml_backend_tensor_get_async(embeddings_backend, embeddings, embedding_out.data(), embd_pos*sizeof(float), embd_size*sizeof(float));
  6454. ggml_backend_synchronize(embeddings_backend);
  6455. }
  6456. // measure the performance only for the single-token evals
  6457. if (n_tokens == 1) {
  6458. lctx.t_eval_us += ggml_time_us() - t_start_us;
  6459. lctx.n_eval++;
  6460. }
  6461. else if (n_tokens > 1) {
  6462. lctx.t_p_eval_us += ggml_time_us() - t_start_us;
  6463. lctx.n_p_eval += n_tokens;
  6464. }
  6465. // get a more accurate load time, upon first eval
  6466. // TODO: fix this
  6467. if (!lctx.has_evaluated_once) {
  6468. lctx.t_load_us = ggml_time_us() - lctx.t_start_us;
  6469. lctx.has_evaluated_once = true;
  6470. }
  6471. return 0;
  6472. }
  6473. //
  6474. // tokenizer
  6475. //
  6476. static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
  6477. return vocab.type;
  6478. }
  6479. static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
  6480. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL;
  6481. }
  6482. static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
  6483. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_UNKNOWN;
  6484. }
  6485. static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
  6486. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_CONTROL;
  6487. }
  6488. static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
  6489. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE;
  6490. }
  6491. static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id) {
  6492. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_USER_DEFINED;
  6493. }
  6494. static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
  6495. GGML_ASSERT(llama_is_byte_token(vocab, id));
  6496. const auto& token_data = vocab.id_to_token.at(id);
  6497. switch (llama_vocab_get_type(vocab)) {
  6498. case LLAMA_VOCAB_TYPE_SPM: {
  6499. auto buf = token_data.text.substr(3, 2);
  6500. return strtol(buf.c_str(), NULL, 16);
  6501. }
  6502. case LLAMA_VOCAB_TYPE_BPE: {
  6503. GGML_ASSERT(false);
  6504. return unicode_to_bytes_bpe(token_data.text);
  6505. }
  6506. case LLAMA_VOCAB_TYPE_WPM: {
  6507. GGML_ASSERT(false);
  6508. }
  6509. default:
  6510. GGML_ASSERT(false);
  6511. }
  6512. }
  6513. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
  6514. static const char * hex = "0123456789ABCDEF";
  6515. switch (llama_vocab_get_type(vocab)) {
  6516. case LLAMA_VOCAB_TYPE_SPM: {
  6517. const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
  6518. auto token = vocab.token_to_id.find(buf);
  6519. if (token != vocab.token_to_id.end()) {
  6520. return (*token).second;
  6521. }
  6522. // Try to fall back to just the byte as a string
  6523. const char buf2[2] = { (char)ch, 0 };
  6524. return vocab.token_to_id.at(buf2);
  6525. }
  6526. case LLAMA_VOCAB_TYPE_WPM:
  6527. case LLAMA_VOCAB_TYPE_BPE: {
  6528. return vocab.token_to_id.at(bytes_to_unicode_bpe(ch));
  6529. }
  6530. default:
  6531. GGML_ASSERT(false);
  6532. }
  6533. }
  6534. static void llama_escape_whitespace(std::string & text) {
  6535. replace_all(text, " ", "\xe2\x96\x81");
  6536. }
  6537. static void llama_unescape_whitespace(std::string & word) {
  6538. replace_all(word, "\xe2\x96\x81", " ");
  6539. }
  6540. struct llm_symbol {
  6541. using index = int;
  6542. index prev;
  6543. index next;
  6544. const char * text;
  6545. size_t n;
  6546. };
  6547. static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
  6548. // SPM tokenizer
  6549. // original implementation:
  6550. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  6551. struct llm_bigram_spm {
  6552. struct comparator {
  6553. bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
  6554. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  6555. }
  6556. };
  6557. using queue_storage = std::vector<llm_bigram_spm>;
  6558. using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
  6559. llm_symbol::index left;
  6560. llm_symbol::index right;
  6561. float score;
  6562. size_t size;
  6563. };
  6564. struct llm_tokenizer_spm {
  6565. llm_tokenizer_spm(const llama_vocab & vocab) : vocab(vocab) {}
  6566. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  6567. // split string into utf8 chars
  6568. int index = 0;
  6569. size_t offs = 0;
  6570. while (offs < text.size()) {
  6571. llm_symbol sym;
  6572. size_t len = utf8_len(text[offs]);
  6573. sym.text = text.c_str() + offs;
  6574. sym.n = std::min(len, text.size() - offs);
  6575. offs += sym.n;
  6576. sym.prev = index - 1;
  6577. sym.next = offs == text.size() ? -1 : index + 1;
  6578. index++;
  6579. symbols.emplace_back(sym);
  6580. }
  6581. // seed the work queue with all possible 2-character tokens.
  6582. for (size_t i = 1; i < symbols.size(); ++i) {
  6583. try_add_bigram(i - 1, i);
  6584. }
  6585. // keep substituting the highest frequency pairs for as long as we can.
  6586. while (!work_queue.empty()) {
  6587. auto bigram = work_queue.top();
  6588. work_queue.pop();
  6589. auto & left_sym = symbols[bigram.left];
  6590. auto & right_sym = symbols[bigram.right];
  6591. // if one of the symbols already got merged, skip it.
  6592. if (left_sym.n == 0 || right_sym.n == 0 ||
  6593. left_sym.n + right_sym.n != bigram.size) {
  6594. continue;
  6595. }
  6596. // merge the right sym into the left one
  6597. left_sym.n += right_sym.n;
  6598. right_sym.n = 0;
  6599. //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  6600. // remove the right sym from the chain
  6601. left_sym.next = right_sym.next;
  6602. if (right_sym.next >= 0) {
  6603. symbols[right_sym.next].prev = bigram.left;
  6604. }
  6605. // find more substitutions
  6606. try_add_bigram(left_sym.prev, bigram.left);
  6607. try_add_bigram(bigram.left, left_sym.next);
  6608. }
  6609. for (int i = 0; i != -1; i = symbols[i].next) {
  6610. auto & symbol = symbols[i];
  6611. resegment(symbol, output);
  6612. }
  6613. }
  6614. private:
  6615. void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
  6616. auto text = std::string(symbol.text, symbol.n);
  6617. auto token = vocab.token_to_id.find(text);
  6618. // Do we need to support is_unused?
  6619. if (token != vocab.token_to_id.end()) {
  6620. output.push_back((*token).second);
  6621. return;
  6622. }
  6623. const auto p = rev_merge.find(text);
  6624. if (p == rev_merge.end()) {
  6625. // output any symbols that did not form tokens as bytes.
  6626. output.reserve(output.size() + symbol.n);
  6627. for (int j = 0; j < (int)symbol.n; ++j) {
  6628. llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
  6629. output.push_back(token_id);
  6630. }
  6631. return;
  6632. }
  6633. resegment(symbols[p->second.first], output);
  6634. resegment(symbols[p->second.second], output);
  6635. }
  6636. void try_add_bigram(int left, int right) {
  6637. if (left == -1 || right == -1) {
  6638. return;
  6639. }
  6640. const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
  6641. auto token = vocab.token_to_id.find(text);
  6642. if (token == vocab.token_to_id.end()) {
  6643. return;
  6644. }
  6645. if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
  6646. return;
  6647. }
  6648. const auto & tok_data = vocab.id_to_token[(*token).second];
  6649. llm_bigram_spm bigram;
  6650. bigram.left = left;
  6651. bigram.right = right;
  6652. bigram.score = tok_data.score;
  6653. bigram.size = text.size();
  6654. work_queue.push(bigram);
  6655. // Do we need to support is_unused?
  6656. rev_merge[text] = std::make_pair(left, right);
  6657. }
  6658. const llama_vocab & vocab;
  6659. std::vector<llm_symbol> symbols;
  6660. llm_bigram_spm::queue work_queue;
  6661. std::map<std::string, std::pair<int, int>> rev_merge;
  6662. };
  6663. // BPE tokenizer
  6664. // adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
  6665. // tried to simplify unicode stuff, so most likely does not work 100% correctly!
  6666. // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
  6667. struct llm_bigram_bpe {
  6668. struct comparator {
  6669. bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
  6670. return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
  6671. }
  6672. };
  6673. using queue_storage = std::vector<llm_bigram_bpe>;
  6674. using queue = std::priority_queue<llm_bigram_bpe, queue_storage, comparator>;
  6675. llm_symbol::index left;
  6676. llm_symbol::index right;
  6677. std::string text;
  6678. int rank;
  6679. size_t size;
  6680. };
  6681. struct llm_tokenizer_bpe {
  6682. llm_tokenizer_bpe(const llama_vocab & vocab): vocab(vocab) {}
  6683. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  6684. int final_prev_index = -1;
  6685. auto word_collection = bpe_gpt2_preprocess(text);
  6686. symbols_final.clear();
  6687. for (auto & word : word_collection) {
  6688. work_queue = llm_bigram_bpe::queue();
  6689. symbols.clear();
  6690. int index = 0;
  6691. size_t offset = 0;
  6692. while (offset < word.size()) {
  6693. llm_symbol sym;
  6694. size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
  6695. sym.text = word.c_str() + offset;
  6696. sym.n = char_len;
  6697. offset += sym.n;
  6698. sym.prev = index - 1;
  6699. sym.next = offset == word.size() ? -1 : index + 1;
  6700. index++;
  6701. symbols.emplace_back(sym);
  6702. }
  6703. for (size_t i = 1; i < symbols.size(); ++i) {
  6704. add_new_bigram(i - 1, i);
  6705. }
  6706. // build token(s)
  6707. while (!work_queue.empty()) {
  6708. auto bigram = work_queue.top();
  6709. work_queue.pop();
  6710. auto & left_symbol = symbols[bigram.left];
  6711. auto & right_symbol = symbols[bigram.right];
  6712. if (left_symbol.n == 0 || right_symbol.n == 0) {
  6713. continue;
  6714. }
  6715. std::string left_token = std::string(left_symbol.text, left_symbol.n);
  6716. std::string right_token = std::string(right_symbol.text, right_symbol.n);
  6717. if (left_token + right_token != bigram.text) {
  6718. continue; // Skip this bigram if it's outdated
  6719. }
  6720. // merge the right sym into the left one
  6721. left_symbol.n += right_symbol.n;
  6722. right_symbol.n = 0;
  6723. // remove the right sym from the chain
  6724. left_symbol.next = right_symbol.next;
  6725. if (right_symbol.next >= 0) {
  6726. symbols[right_symbol.next].prev = bigram.left;
  6727. }
  6728. add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
  6729. add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
  6730. }
  6731. // add the fnished tokens to the final list keeping correct order for next and prev
  6732. for (auto & sym : symbols) {
  6733. if (sym.n > 0) {
  6734. sym.prev = final_prev_index;
  6735. sym.next = -1;
  6736. if (final_prev_index != -1) {
  6737. symbols_final[final_prev_index].next = symbols_final.size();
  6738. }
  6739. symbols_final.emplace_back(sym);
  6740. final_prev_index = symbols_final.size() - 1;
  6741. }
  6742. }
  6743. }
  6744. symbols = symbols_final;
  6745. if (!symbols.empty()) {
  6746. for (int i = 0; i != -1; i = symbols[i].next) {
  6747. auto & symbol = symbols[i];
  6748. if (symbol.n == 0) {
  6749. continue;
  6750. }
  6751. const std::string str = std::string(symbol.text, symbol.n);
  6752. const auto token = vocab.token_to_id.find(str);
  6753. if (token == vocab.token_to_id.end()) {
  6754. for (auto j = str.begin(); j != str.end(); ++j) {
  6755. std::string byte_str(1, *j);
  6756. auto token_multibyte = vocab.token_to_id.find(byte_str);
  6757. if (token_multibyte == vocab.token_to_id.end()) {
  6758. throw std::runtime_error("ERROR: byte not found in vocab");
  6759. }
  6760. output.push_back((*token_multibyte).second);
  6761. }
  6762. } else {
  6763. output.push_back((*token).second);
  6764. }
  6765. }
  6766. }
  6767. }
  6768. private:
  6769. void add_new_bigram(int left, int right) {
  6770. if (left == -1 || right == -1) {
  6771. return;
  6772. }
  6773. std::string left_token = std::string(symbols[left].text, symbols[left].n);
  6774. std::string right_token = std::string(symbols[right].text, symbols[right].n);
  6775. int rank_found = -1;
  6776. rank_found = vocab.find_bpe_rank(left_token, right_token);
  6777. if (rank_found < 0) {
  6778. return;
  6779. }
  6780. llm_bigram_bpe bigram;
  6781. bigram.left = left;
  6782. bigram.right = right;
  6783. bigram.text = left_token + right_token;
  6784. bigram.size = left_token.size() + right_token.size();
  6785. bigram.rank = rank_found;
  6786. work_queue.push(bigram);
  6787. }
  6788. std::vector<std::string> bpe_gpt2_preprocess(const std::string & text) {
  6789. std::vector<std::string> bpe_words;
  6790. std::vector<std::string> bpe_encoded_words;
  6791. std::string token = "";
  6792. // GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+
  6793. bool collecting_numeric = false;
  6794. bool collecting_letter = false;
  6795. bool collecting_special = false;
  6796. bool collecting_whitespace_lookahead = false;
  6797. bool collecting = false;
  6798. std::vector<std::string> text_utf;
  6799. text_utf.reserve(text.size());
  6800. bpe_words.reserve(text.size());
  6801. bpe_encoded_words.reserve(text.size());
  6802. auto cps = codepoints_from_utf8(text);
  6803. for (size_t i = 0; i < cps.size(); ++i)
  6804. text_utf.emplace_back(codepoint_to_utf8(cps[i]));
  6805. for (int i = 0; i < (int)text_utf.size(); i++) {
  6806. const std::string & utf_char = text_utf[i];
  6807. bool split_condition = false;
  6808. int bytes_remain = text_utf.size() - i;
  6809. // forward backward lookups
  6810. const std::string & utf_char_next = (i + 1 < (int)text_utf.size()) ? text_utf[i + 1] : "";
  6811. const std::string & utf_char_next_next = (i + 2 < (int)text_utf.size()) ? text_utf[i + 2] : "";
  6812. // handling contractions
  6813. if (!split_condition && bytes_remain >= 2) {
  6814. // 's|'t|'m|'d
  6815. if (utf_char == "\'" && (utf_char_next == "s" || utf_char_next == "t" || utf_char_next == "m" || utf_char_next == "d")) {
  6816. split_condition = true;
  6817. }
  6818. if (split_condition) {
  6819. if (token.size()) {
  6820. bpe_words.emplace_back(token); // push previous content as token
  6821. }
  6822. token = utf_char + utf_char_next;
  6823. bpe_words.emplace_back(token);
  6824. token = "";
  6825. i++;
  6826. continue;
  6827. }
  6828. }
  6829. if (!split_condition && bytes_remain >= 3) {
  6830. // 're|'ve|'ll
  6831. if (utf_char == "\'" && (
  6832. (utf_char_next == "r" && utf_char_next_next == "e") ||
  6833. (utf_char_next == "v" && utf_char_next_next == "e") ||
  6834. (utf_char_next == "l" && utf_char_next_next == "l"))
  6835. ) {
  6836. split_condition = true;
  6837. }
  6838. if (split_condition) {
  6839. // current token + next token can be defined
  6840. if (token.size()) {
  6841. bpe_words.emplace_back(token); // push previous content as token
  6842. }
  6843. token = utf_char + utf_char_next + utf_char_next_next;
  6844. bpe_words.emplace_back(token); // the contraction
  6845. token = "";
  6846. i += 2;
  6847. continue;
  6848. }
  6849. }
  6850. if (!split_condition && !collecting) {
  6851. if (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER)) {
  6852. collecting_letter = true;
  6853. collecting = true;
  6854. }
  6855. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
  6856. collecting_numeric = true;
  6857. collecting = true;
  6858. }
  6859. else if (
  6860. ((codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) && (codepoint_type(utf_char) != CODEPOINT_TYPE_WHITESPACE)) ||
  6861. (!token.size() && utf_char == " " && codepoint_type(utf_char_next) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char_next) != CODEPOINT_TYPE_DIGIT && codepoint_type(utf_char_next) != CODEPOINT_TYPE_WHITESPACE)
  6862. ) {
  6863. collecting_special = true;
  6864. collecting = true;
  6865. }
  6866. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE && codepoint_type(utf_char_next) == CODEPOINT_TYPE_WHITESPACE) {
  6867. collecting_whitespace_lookahead = true;
  6868. collecting = true;
  6869. }
  6870. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE) {
  6871. split_condition = true;
  6872. }
  6873. }
  6874. else if (!split_condition && collecting) {
  6875. if (collecting_letter && codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER) {
  6876. split_condition = true;
  6877. }
  6878. else if (collecting_numeric && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) {
  6879. split_condition = true;
  6880. }
  6881. else if (collecting_special && (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE)) {
  6882. split_condition = true;
  6883. }
  6884. else if (collecting_whitespace_lookahead && (codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
  6885. split_condition = true;
  6886. }
  6887. }
  6888. if (utf_char_next == "") {
  6889. split_condition = true; // final
  6890. token += utf_char;
  6891. }
  6892. if (split_condition) {
  6893. if (token.size()) {
  6894. bpe_words.emplace_back(token);
  6895. }
  6896. token = utf_char;
  6897. collecting = false;
  6898. collecting_letter = false;
  6899. collecting_numeric = false;
  6900. collecting_special = false;
  6901. collecting_whitespace_lookahead = false;
  6902. }
  6903. else {
  6904. token += utf_char;
  6905. }
  6906. }
  6907. for (std::string & word : bpe_words) {
  6908. std::string encoded_token = "";
  6909. for (char & c : word) {
  6910. encoded_token += bytes_to_unicode_bpe(c);
  6911. }
  6912. bpe_encoded_words.emplace_back(encoded_token);
  6913. }
  6914. return bpe_encoded_words;
  6915. }
  6916. const llama_vocab & vocab;
  6917. std::vector<llm_symbol> symbols;
  6918. std::vector<llm_symbol> symbols_final;
  6919. llm_bigram_bpe::queue work_queue;
  6920. };
  6921. struct llm_tokenizer_wpm {
  6922. llm_tokenizer_wpm(const llama_vocab & vocab): vocab(vocab) {}
  6923. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  6924. auto * token_map = &vocab.token_to_id;
  6925. // normalize and split by whitespace
  6926. std::vector<std::string> words = preprocess(text);
  6927. // bos token prepended already
  6928. // find the longest tokens that form the words
  6929. for (const std::string &word : words) {
  6930. // skip empty words
  6931. if (word.size() == 0) {
  6932. continue;
  6933. }
  6934. // prepend phantom space
  6935. std::string word1 = "\xe2\x96\x81" + word;
  6936. int n = word1.size();
  6937. // we're at the start of a new word
  6938. int i = 0;
  6939. bool match_any = false;
  6940. // move through character position in word
  6941. while (i < n) {
  6942. // loop through possible match length
  6943. bool match = false;
  6944. for (int j = n; j > i; j--) {
  6945. auto it = token_map->find(word1.substr(i, j - i));
  6946. if (it != token_map->end()) {
  6947. output.push_back(it->second);
  6948. match = true;
  6949. match_any = true;
  6950. i = j;
  6951. break;
  6952. }
  6953. }
  6954. // must be an unknown character
  6955. if (!match) {
  6956. i++;
  6957. }
  6958. }
  6959. // we didn't find any matches for this word
  6960. if (!match_any) {
  6961. output.push_back(vocab.special_unk_id);
  6962. }
  6963. }
  6964. // append eos token
  6965. output.push_back(vocab.special_eos_id);
  6966. }
  6967. std::vector<std::string> preprocess(const std::string & text) {
  6968. std::string ori_str = normalize(text);
  6969. uint64_t ori_size = ori_str.size();
  6970. // single punct / single symbol / single digit
  6971. // baseline: add whitespace on the left and right of punct and chinese characters
  6972. std::vector<std::string> words;
  6973. std::string new_str = "";
  6974. uint64_t i = 0;
  6975. while (i < ori_size) {
  6976. int utf_char_len = utf8_len(ori_str[i]);
  6977. if ((utf_char_len == 1) && ispunct(ori_str[i])) {
  6978. new_str += " ";
  6979. new_str += ori_str[i];
  6980. new_str += " ";
  6981. i += 1;
  6982. }
  6983. else if ((utf_char_len == 3) && is_chinese_char(ori_str.substr(i, 3))) {
  6984. new_str += " ";
  6985. new_str += ori_str.substr(i, 3);
  6986. new_str += " ";
  6987. i += 3;
  6988. }
  6989. else {
  6990. new_str += ori_str[i];
  6991. i += 1;
  6992. }
  6993. }
  6994. // split by whitespace
  6995. uint64_t l = 0;
  6996. uint64_t r = 0;
  6997. while (r < new_str.size()) {
  6998. // if is whitespace
  6999. if (isspace(new_str[r])) {
  7000. if (r > l) words.push_back(new_str.substr(l, (r - l)));
  7001. l = r + 1;
  7002. r = l;
  7003. }
  7004. else {
  7005. r += 1;
  7006. }
  7007. }
  7008. if (r > l) {
  7009. words.push_back(new_str.substr(l, (r - l)));
  7010. }
  7011. return words;
  7012. }
  7013. std::string normalize(const std::string & text) {
  7014. // TODO: handle chinese characters? https://github.com/huggingface/tokenizers/blob/ef5f50605ddf9f8caef1598c0e4853862b9707a7/tokenizers/src/normalizers/bert.rs#L98
  7015. std::string text2 = strip_accents(text);
  7016. for (size_t i = 0; i < text2.size(); i += utf8_len(text2[i])) {
  7017. char c = text2[i];
  7018. if (c >= 'A' && c <= 'Z') {
  7019. text2[i] = c - 'A' + 'a';
  7020. }
  7021. }
  7022. return text2;
  7023. }
  7024. bool is_chinese_char(const std::string & str) {
  7025. int len = str.length();
  7026. unsigned int codepoint = 0;
  7027. int num_bytes = 0;
  7028. int i = 0;
  7029. unsigned char ch = static_cast<unsigned char>(str[i]);
  7030. if (ch <= 0x7f) {
  7031. codepoint = ch;
  7032. num_bytes = 1;
  7033. } else if ((ch >> 5) == 0x06) {
  7034. codepoint = ch & 0x1f;
  7035. num_bytes = 2;
  7036. } else if ((ch >> 4) == 0x0e) {
  7037. codepoint = ch & 0x0f;
  7038. num_bytes = 3;
  7039. } else if ((ch >> 3) == 0x1e) {
  7040. codepoint = ch & 0x07;
  7041. num_bytes = 4;
  7042. }
  7043. for (int j = 1; j < num_bytes; ++j) {
  7044. if (i + j >= len) {
  7045. return false; // incomplete UTF-8 character
  7046. }
  7047. unsigned char next_ch = static_cast<unsigned char>(str[i + j]);
  7048. if ((next_ch >> 6) != 0x02) {
  7049. return false; // invalid trailing byte
  7050. }
  7051. codepoint = (codepoint << 6) | (next_ch & 0x3f);
  7052. }
  7053. if ((codepoint >= 0x4E00 && codepoint <= 0x9FFF) ||
  7054. (codepoint >= 0x3400 && codepoint <= 0x4DBF) ||
  7055. (codepoint >= 0x20000 && codepoint <= 0x2A6DF) ||
  7056. (codepoint >= 0x2A700 && codepoint <= 0x2B73F) ||
  7057. (codepoint >= 0x2B740 && codepoint <= 0x2B81F) ||
  7058. (codepoint >= 0x2B920 && codepoint <= 0x2CEAF) || // this should be 0x2B820 but in hf rust code it is 0x2B920
  7059. (codepoint >= 0xF900 && codepoint <= 0xFAFF) ||
  7060. (codepoint >= 0x2F800 && codepoint <= 0x2FA1F) ||
  7061. (codepoint >= 0x3000 && codepoint <= 0x303F) ||
  7062. (codepoint >= 0xFF00 && codepoint <= 0xFFEF)) {
  7063. return true; // NOLINT
  7064. }
  7065. return false;
  7066. }
  7067. std::string strip_accents(const std::string & input_string) {
  7068. std::string resultString;
  7069. std::map<std::string, char> accent_map = {
  7070. {"À", 'A'}, {"Á", 'A'}, {"Â", 'A'}, {"Ã", 'A'}, {"Ä", 'A'}, {"Å", 'A'},
  7071. {"à", 'a'}, {"á", 'a'}, {"â", 'a'}, {"ã", 'a'}, {"ä", 'a'}, {"å", 'a'},
  7072. {"È", 'E'}, {"É", 'E'}, {"Ê", 'E'}, {"Ë", 'E'}, {"è", 'e'}, {"é", 'e'},
  7073. {"ê", 'e'}, {"ë", 'e'}, {"Ì", 'I'}, {"Í", 'I'}, {"Î", 'I'}, {"Ï", 'I'},
  7074. {"ì", 'i'}, {"í", 'i'}, {"î", 'i'}, {"ï", 'i'}, {"Ò", 'O'}, {"Ó", 'O'},
  7075. {"Ô", 'O'}, {"Õ", 'O'}, {"Ö", 'O'}, {"ò", 'o'}, {"ó", 'o'}, {"ô", 'o'},
  7076. {"õ", 'o'}, {"ö", 'o'}, {"Ù", 'U'}, {"Ú", 'U'}, {"Û", 'U'}, {"Ü", 'U'},
  7077. {"ù", 'u'}, {"ú", 'u'}, {"û", 'u'}, {"ü", 'u'}, {"Ý", 'Y'}, {"ý", 'y'},
  7078. {"Ç", 'C'}, {"ç", 'c'}, {"Ñ", 'N'}, {"ñ", 'n'},
  7079. };
  7080. for (size_t i = 0; i < input_string.length();) {
  7081. int len = utf8_len(input_string[i]);
  7082. std::string curChar = input_string.substr(i, len);
  7083. auto iter = accent_map.find(curChar);
  7084. if (iter != accent_map.end()) {
  7085. resultString += iter->second;
  7086. } else {
  7087. resultString += curChar;
  7088. }
  7089. i += len;
  7090. }
  7091. return resultString;
  7092. }
  7093. static size_t utf8_len(char src) {
  7094. const size_t lookup[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4};
  7095. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  7096. return lookup[highbits];
  7097. }
  7098. const llama_vocab & vocab;
  7099. };
  7100. typedef enum FRAGMENT_BUFFER_VARIANT_TYPE {
  7101. FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
  7102. FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
  7103. } FRAGMENT_BUFFER_VARIANT_TYPE;
  7104. struct fragment_buffer_variant {
  7105. fragment_buffer_variant(llama_vocab::id _token)
  7106. :
  7107. type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
  7108. token(_token),
  7109. raw_text(_dummy),
  7110. offset(0),
  7111. length(0) {}
  7112. fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
  7113. :
  7114. type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
  7115. token((llama_vocab::id) - 1),
  7116. raw_text(_raw_text),
  7117. offset(_offset),
  7118. length(_length){
  7119. GGML_ASSERT(_offset >= 0);
  7120. GGML_ASSERT(_length >= 1);
  7121. GGML_ASSERT(offset + length <= raw_text.length());
  7122. }
  7123. const FRAGMENT_BUFFER_VARIANT_TYPE type;
  7124. const llama_vocab::id token;
  7125. const std::string _dummy;
  7126. const std::string & raw_text;
  7127. const uint64_t offset;
  7128. const uint64_t length;
  7129. };
  7130. // #define PRETOKENIZERDEBUG
  7131. static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer) {
  7132. // for each special token
  7133. for (const auto & st: vocab.special_tokens_cache) {
  7134. const auto & special_token = st.first;
  7135. const auto & special_id = st.second;
  7136. // for each text fragment
  7137. std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
  7138. while (it != buffer.end()) {
  7139. auto & fragment = (*it);
  7140. // if a fragment is text ( not yet processed )
  7141. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  7142. auto * raw_text = &(fragment.raw_text);
  7143. auto raw_text_base_offset = fragment.offset;
  7144. auto raw_text_base_length = fragment.length;
  7145. // loop over the text
  7146. while (true) {
  7147. // find the first occurrence of a given special token in this fragment
  7148. // passing offset argument only limit the "search area" but match coordinates
  7149. // are still relative to the source full raw_text
  7150. auto match = raw_text->find(special_token, raw_text_base_offset);
  7151. // no occurrences found, stop processing this fragment for a given special token
  7152. if (match == std::string::npos) break;
  7153. // check if match is within bounds of offset <-> length
  7154. if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
  7155. #ifdef PRETOKENIZERDEBUG
  7156. LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  7157. #endif
  7158. auto source = std::distance(buffer.begin(), it);
  7159. // if match is further than base offset
  7160. // then we have some text to the left of it
  7161. if (match > raw_text_base_offset) {
  7162. // left
  7163. const int64_t left_reminder_offset = raw_text_base_offset + 0;
  7164. const int64_t left_reminder_length = match - raw_text_base_offset;
  7165. buffer.emplace_after(it, (*raw_text), left_reminder_offset, left_reminder_length);
  7166. #ifdef PRETOKENIZERDEBUG
  7167. LLAMA_LOG_WARN("FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
  7168. #endif
  7169. it++;
  7170. }
  7171. // special token
  7172. buffer.emplace_after(it, special_id);
  7173. it++;
  7174. // right
  7175. if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
  7176. const int64_t right_reminder_offset = match + special_token.length();
  7177. const int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
  7178. buffer.emplace_after(it, (*raw_text), right_reminder_offset, right_reminder_length);
  7179. #ifdef PRETOKENIZERDEBUG
  7180. LLAMA_LOG_WARN("FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
  7181. #endif
  7182. it++;
  7183. if (source == 0) {
  7184. buffer.erase_after(buffer.before_begin());
  7185. } else {
  7186. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  7187. }
  7188. // repeat for the right side
  7189. raw_text_base_offset = right_reminder_offset;
  7190. raw_text_base_length = right_reminder_length;
  7191. #ifdef PRETOKENIZERDEBUG
  7192. LLAMA_LOG_WARN("RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  7193. #endif
  7194. } else {
  7195. if (source == 0) {
  7196. buffer.erase_after(buffer.before_begin());
  7197. } else {
  7198. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  7199. }
  7200. break;
  7201. }
  7202. }
  7203. }
  7204. it++;
  7205. }
  7206. }
  7207. }
  7208. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special) {
  7209. std::vector<llama_vocab::id> output;
  7210. // OG tokenizer behavior:
  7211. //
  7212. // tokenizer.encode('', add_bos=True) returns [1]
  7213. // tokenizer.encode('', add_bos=False) returns []
  7214. if (bos && vocab.special_bos_id != -1) {
  7215. output.push_back(vocab.special_bos_id);
  7216. }
  7217. if (raw_text.empty()) {
  7218. return output;
  7219. }
  7220. std::forward_list<fragment_buffer_variant> fragment_buffer;
  7221. fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
  7222. if (special) tokenizer_st_partition(vocab, fragment_buffer);
  7223. switch (vocab.type) {
  7224. case LLAMA_VOCAB_TYPE_SPM:
  7225. {
  7226. for (const auto & fragment : fragment_buffer) {
  7227. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  7228. // without adding this leading whitespace, we do not get the same results as the original tokenizer
  7229. // TODO: It's likely possible to get rid of this string copy entirely
  7230. // by modifying llm_tokenizer_x to operate with string offsets like pre-tokenizer
  7231. // and passing 'add space prefix' as bool argument
  7232. //
  7233. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  7234. if (&fragment == &fragment_buffer.front()) {
  7235. if (vocab.add_space_prefix) {
  7236. raw_text = " " + raw_text; // prefix with space if the first token is not special
  7237. }
  7238. }
  7239. #ifdef PRETOKENIZERDEBUG
  7240. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  7241. #endif
  7242. llm_tokenizer_spm tokenizer(vocab);
  7243. llama_escape_whitespace(raw_text);
  7244. tokenizer.tokenize(raw_text, output);
  7245. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  7246. output.push_back(fragment.token);
  7247. }
  7248. }
  7249. } break;
  7250. case LLAMA_VOCAB_TYPE_BPE:
  7251. {
  7252. for (const auto & fragment : fragment_buffer) {
  7253. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  7254. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  7255. #ifdef PRETOKENIZERDEBUG
  7256. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  7257. #endif
  7258. llm_tokenizer_bpe tokenizer(vocab);
  7259. tokenizer.tokenize(raw_text, output);
  7260. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  7261. output.push_back(fragment.token);
  7262. }
  7263. }
  7264. } break;
  7265. case LLAMA_VOCAB_TYPE_WPM:
  7266. {
  7267. for (const auto & fragment : fragment_buffer) {
  7268. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  7269. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  7270. #ifdef PRETOKENIZERDEBUG
  7271. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  7272. #endif
  7273. llm_tokenizer_wpm tokenizer(vocab);
  7274. tokenizer.tokenize(raw_text, output);
  7275. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  7276. output.push_back(fragment.token);
  7277. }
  7278. }
  7279. } break;
  7280. }
  7281. return output;
  7282. }
  7283. //
  7284. // grammar - internal
  7285. //
  7286. struct llama_partial_utf8 {
  7287. uint32_t value; // bit value so far (unshifted)
  7288. int n_remain; // num bytes remaining; -1 indicates invalid sequence
  7289. };
  7290. struct llama_grammar {
  7291. const std::vector<std::vector<llama_grammar_element>> rules;
  7292. std::vector<std::vector<const llama_grammar_element *>> stacks;
  7293. // buffer for partially generated UTF-8 sequence from accepted tokens
  7294. llama_partial_utf8 partial_utf8;
  7295. };
  7296. struct llama_grammar_candidate {
  7297. size_t index;
  7298. const uint32_t * code_points;
  7299. llama_partial_utf8 partial_utf8;
  7300. };
  7301. // Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
  7302. // pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
  7303. static std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
  7304. const std::string & src,
  7305. llama_partial_utf8 partial_start) {
  7306. static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
  7307. const char * pos = src.c_str();
  7308. std::vector<uint32_t> code_points;
  7309. // common english strings have the same number of codepoints and bytes. `+ 1` for the terminating 0.
  7310. code_points.reserve(src.size() + 1);
  7311. uint32_t value = partial_start.value;
  7312. int n_remain = partial_start.n_remain;
  7313. // continue previous decode, if applicable
  7314. while (*pos != 0 && n_remain > 0) {
  7315. uint8_t next_byte = static_cast<uint8_t>(*pos);
  7316. if ((next_byte >> 6) != 2) {
  7317. // invalid sequence, abort
  7318. code_points.push_back(0);
  7319. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, -1 });
  7320. }
  7321. value = (value << 6) + (next_byte & 0x3F);
  7322. ++pos;
  7323. --n_remain;
  7324. }
  7325. if (partial_start.n_remain > 0 && n_remain == 0) {
  7326. code_points.push_back(value);
  7327. }
  7328. // decode any subsequent utf-8 sequences, which may end in an incomplete one
  7329. while (*pos != 0) {
  7330. uint8_t first_byte = static_cast<uint8_t>(*pos);
  7331. uint8_t highbits = first_byte >> 4;
  7332. n_remain = lookup[highbits] - 1;
  7333. if (n_remain < 0) {
  7334. // invalid sequence, abort
  7335. code_points.clear();
  7336. code_points.push_back(0);
  7337. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, n_remain });
  7338. }
  7339. uint8_t mask = (1 << (7 - n_remain)) - 1;
  7340. value = first_byte & mask;
  7341. ++pos;
  7342. while (*pos != 0 && n_remain > 0) {
  7343. value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
  7344. ++pos;
  7345. --n_remain;
  7346. }
  7347. if (n_remain == 0) {
  7348. code_points.push_back(value);
  7349. }
  7350. }
  7351. code_points.push_back(0);
  7352. return std::make_pair(std::move(code_points), llama_partial_utf8{ value, n_remain });
  7353. }
  7354. // returns true iff pos points to the end of one of the definitions of a rule
  7355. static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
  7356. switch (pos->type) {
  7357. case LLAMA_GRETYPE_END: return true; // NOLINT
  7358. case LLAMA_GRETYPE_ALT: return true; // NOLINT
  7359. default: return false;
  7360. }
  7361. }
  7362. // returns true iff chr satisfies the char range at pos (regular or inverse range)
  7363. // asserts that pos is pointing to a char range element
  7364. static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
  7365. const llama_grammar_element * pos,
  7366. const uint32_t chr) {
  7367. bool found = false;
  7368. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  7369. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); // NOLINT
  7370. do {
  7371. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  7372. // inclusive range, e.g. [a-z]
  7373. found = found || (pos->value <= chr && chr <= pos[1].value);
  7374. pos += 2;
  7375. } else {
  7376. // exact char match, e.g. [a] or "a"
  7377. found = found || pos->value == chr;
  7378. pos += 1;
  7379. }
  7380. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  7381. return std::make_pair(found == is_positive_char, pos);
  7382. }
  7383. // returns true iff some continuation of the given partial UTF-8 sequence could satisfy the char
  7384. // range at pos (regular or inverse range)
  7385. // asserts that pos is pointing to a char range element
  7386. static bool llama_grammar_match_partial_char(
  7387. const llama_grammar_element * pos,
  7388. const llama_partial_utf8 partial_utf8) {
  7389. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  7390. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  7391. uint32_t partial_value = partial_utf8.value;
  7392. int n_remain = partial_utf8.n_remain;
  7393. // invalid sequence or 7-bit char split across 2 bytes (overlong)
  7394. if (n_remain < 0 || (n_remain == 1 && partial_value < 2)) {
  7395. return false;
  7396. }
  7397. // range of possible code points this partial UTF-8 sequence could complete to
  7398. uint32_t low = partial_value << (n_remain * 6);
  7399. uint32_t high = low | ((1 << (n_remain * 6)) - 1);
  7400. if (low == 0) {
  7401. if (n_remain == 2) {
  7402. low = 1 << 11;
  7403. } else if (n_remain == 3) {
  7404. low = 1 << 16;
  7405. }
  7406. }
  7407. do {
  7408. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  7409. // inclusive range, e.g. [a-z]
  7410. if (pos->value <= high && low <= pos[1].value) {
  7411. return is_positive_char;
  7412. }
  7413. pos += 2;
  7414. } else {
  7415. // exact char match, e.g. [a] or "a"
  7416. if (low <= pos->value && pos->value <= high) {
  7417. return is_positive_char;
  7418. }
  7419. pos += 1;
  7420. }
  7421. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  7422. return !is_positive_char;
  7423. }
  7424. // transforms a grammar pushdown stack into N possible stacks, all ending
  7425. // at a character range (terminal element)
  7426. static void llama_grammar_advance_stack(
  7427. const std::vector<std::vector<llama_grammar_element>> & rules,
  7428. const std::vector<const llama_grammar_element *> & stack,
  7429. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  7430. if (stack.empty()) {
  7431. new_stacks.emplace_back(stack);
  7432. return;
  7433. }
  7434. const llama_grammar_element * pos = stack.back();
  7435. switch (pos->type) {
  7436. case LLAMA_GRETYPE_RULE_REF: {
  7437. const size_t rule_id = static_cast<size_t>(pos->value);
  7438. const llama_grammar_element * subpos = rules[rule_id].data();
  7439. do {
  7440. // init new stack without the top (pos)
  7441. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  7442. if (!llama_grammar_is_end_of_sequence(pos + 1)) {
  7443. // if this rule ref is followed by another element, add that to stack
  7444. new_stack.push_back(pos + 1);
  7445. }
  7446. if (!llama_grammar_is_end_of_sequence(subpos)) {
  7447. // if alternate is nonempty, add to stack
  7448. new_stack.push_back(subpos);
  7449. }
  7450. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  7451. while (!llama_grammar_is_end_of_sequence(subpos)) {
  7452. // scan to end of alternate def
  7453. subpos++;
  7454. }
  7455. if (subpos->type == LLAMA_GRETYPE_ALT) {
  7456. // there's another alternate def of this rule to process
  7457. subpos++;
  7458. } else {
  7459. break;
  7460. }
  7461. } while (true);
  7462. break;
  7463. }
  7464. case LLAMA_GRETYPE_CHAR:
  7465. case LLAMA_GRETYPE_CHAR_NOT:
  7466. new_stacks.emplace_back(stack);
  7467. break;
  7468. default:
  7469. // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
  7470. // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
  7471. // those
  7472. GGML_ASSERT(false);
  7473. }
  7474. }
  7475. // takes a set of possible pushdown stacks on a grammar, which are required to
  7476. // be positioned at a character range (see `llama_grammar_advance_stack`), and
  7477. // produces the N possible stacks if the given char is accepted at those
  7478. // positions
  7479. static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
  7480. const std::vector<std::vector<llama_grammar_element>> & rules,
  7481. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  7482. const uint32_t chr) {
  7483. std::vector<std::vector<const llama_grammar_element *>> new_stacks;
  7484. for (const auto & stack : stacks) {
  7485. if (stack.empty()) {
  7486. continue;
  7487. }
  7488. auto match = llama_grammar_match_char(stack.back(), chr);
  7489. if (match.first) {
  7490. const llama_grammar_element * pos = match.second;
  7491. // update top of stack to next element, if any
  7492. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  7493. if (!llama_grammar_is_end_of_sequence(pos)) {
  7494. new_stack.push_back(pos);
  7495. }
  7496. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  7497. }
  7498. }
  7499. return new_stacks;
  7500. }
  7501. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  7502. const std::vector<std::vector<llama_grammar_element>> & rules,
  7503. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  7504. const std::vector<llama_grammar_candidate> & candidates);
  7505. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
  7506. const std::vector<std::vector<llama_grammar_element>> & rules,
  7507. const std::vector<const llama_grammar_element *> & stack,
  7508. const std::vector<llama_grammar_candidate> & candidates) {
  7509. std::vector<llama_grammar_candidate> rejects;
  7510. if (stack.empty()) {
  7511. for (const auto & tok : candidates) {
  7512. if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
  7513. rejects.push_back(tok);
  7514. }
  7515. }
  7516. return rejects;
  7517. }
  7518. const llama_grammar_element * stack_pos = stack.back();
  7519. std::vector<llama_grammar_candidate> next_candidates;
  7520. for (const auto & tok : candidates) {
  7521. if (*tok.code_points == 0) {
  7522. // reached end of full codepoints in token, reject iff it ended in a partial sequence
  7523. // that cannot satisfy this position in grammar
  7524. if (tok.partial_utf8.n_remain != 0 &&
  7525. !llama_grammar_match_partial_char(stack_pos, tok.partial_utf8)) {
  7526. rejects.push_back(tok);
  7527. }
  7528. } else if (llama_grammar_match_char(stack_pos, *tok.code_points).first) {
  7529. next_candidates.push_back({ tok.index, tok.code_points + 1, tok.partial_utf8 });
  7530. } else {
  7531. rejects.push_back(tok);
  7532. }
  7533. }
  7534. const auto * stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
  7535. // update top of stack to next element, if any
  7536. std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
  7537. if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
  7538. stack_after.push_back(stack_pos_after);
  7539. }
  7540. std::vector<std::vector<const llama_grammar_element *>> next_stacks;
  7541. llama_grammar_advance_stack(rules, stack_after, next_stacks);
  7542. auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
  7543. for (const auto & tok : next_rejects) {
  7544. rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
  7545. }
  7546. return rejects;
  7547. }
  7548. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  7549. const std::vector<std::vector<llama_grammar_element>> & rules,
  7550. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  7551. const std::vector<llama_grammar_candidate> & candidates) {
  7552. GGML_ASSERT(!stacks.empty()); // REVIEW
  7553. if (candidates.empty()) {
  7554. return std::vector<llama_grammar_candidate>();
  7555. }
  7556. auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
  7557. for (size_t i = 1, size = stacks.size(); i < size; ++i) {
  7558. rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
  7559. }
  7560. return rejects;
  7561. }
  7562. //
  7563. // grammar - external
  7564. //
  7565. struct llama_grammar * llama_grammar_init(
  7566. const llama_grammar_element ** rules,
  7567. size_t n_rules,
  7568. size_t start_rule_index) {
  7569. const llama_grammar_element * pos;
  7570. // copy rule definitions into vectors
  7571. std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
  7572. for (size_t i = 0; i < n_rules; i++) {
  7573. for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
  7574. vec_rules[i].push_back(*pos);
  7575. }
  7576. vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
  7577. }
  7578. // loop over alternates of start rule to build initial stacks
  7579. std::vector<std::vector<const llama_grammar_element *>> stacks;
  7580. pos = rules[start_rule_index];
  7581. do {
  7582. std::vector<const llama_grammar_element *> stack;
  7583. if (!llama_grammar_is_end_of_sequence(pos)) {
  7584. // if alternate is nonempty, add to stack
  7585. stack.push_back(pos);
  7586. }
  7587. llama_grammar_advance_stack(vec_rules, stack, stacks);
  7588. while (!llama_grammar_is_end_of_sequence(pos)) {
  7589. // scan to end of alternate def
  7590. pos++;
  7591. }
  7592. if (pos->type == LLAMA_GRETYPE_ALT) {
  7593. // there's another alternate def of this rule to process
  7594. pos++;
  7595. } else {
  7596. break;
  7597. }
  7598. } while (true);
  7599. return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} };
  7600. }
  7601. void llama_grammar_free(struct llama_grammar * grammar) {
  7602. delete grammar;
  7603. }
  7604. struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar) {
  7605. llama_grammar * result = new llama_grammar{ grammar->rules, grammar->stacks, grammar->partial_utf8 };
  7606. // redirect elements in stacks to point to new rules
  7607. for (size_t is = 0; is < result->stacks.size(); is++) {
  7608. for (size_t ie = 0; ie < result->stacks[is].size(); ie++) {
  7609. for (size_t ir0 = 0; ir0 < grammar->rules.size(); ir0++) {
  7610. for (size_t ir1 = 0; ir1 < grammar->rules[ir0].size(); ir1++) {
  7611. if (grammar->stacks[is][ie] == &grammar->rules[ir0][ir1]) {
  7612. result->stacks[is][ie] = &result->rules[ir0][ir1];
  7613. }
  7614. }
  7615. }
  7616. }
  7617. }
  7618. return result;
  7619. }
  7620. //
  7621. // sampling
  7622. //
  7623. void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
  7624. if (seed == LLAMA_DEFAULT_SEED) {
  7625. seed = time(NULL);
  7626. }
  7627. ctx->rng.seed(seed);
  7628. }
  7629. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  7630. GGML_ASSERT(candidates->size > 0);
  7631. const int64_t t_start_sample_us = ggml_time_us();
  7632. // Sort the logits in descending order
  7633. if (!candidates->sorted) {
  7634. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  7635. return a.logit > b.logit;
  7636. });
  7637. candidates->sorted = true;
  7638. }
  7639. float max_l = candidates->data[0].logit;
  7640. float cum_sum = 0.0f;
  7641. for (size_t i = 0; i < candidates->size; ++i) {
  7642. float p = expf(candidates->data[i].logit - max_l);
  7643. candidates->data[i].p = p;
  7644. cum_sum += p;
  7645. }
  7646. for (size_t i = 0; i < candidates->size; ++i) {
  7647. candidates->data[i].p /= cum_sum;
  7648. }
  7649. if (ctx) {
  7650. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7651. }
  7652. }
  7653. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int32_t k, size_t min_keep) {
  7654. // TODO: move bucket sort to separate function so that top_p/tail_free/typical/softmax first is equally fast
  7655. // if (k >= (int32_t)candidates->size) {
  7656. // return;
  7657. // }
  7658. const int64_t t_start_sample_us = ggml_time_us();
  7659. if (k <= 0) {
  7660. k = candidates->size;
  7661. }
  7662. k = std::max(k, (int) min_keep);
  7663. k = std::min(k, (int) candidates->size);
  7664. // Sort scores in descending order
  7665. if (!candidates->sorted) {
  7666. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  7667. return a.logit > b.logit;
  7668. };
  7669. if (k <= 128) {
  7670. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  7671. } else {
  7672. constexpr int nbuckets = 128;
  7673. constexpr float bucket_low = -10.0f;
  7674. constexpr float bucket_high = 10.0f;
  7675. constexpr float bucket_scale = nbuckets/(bucket_high - bucket_low);
  7676. constexpr float bucker_inter = -bucket_low * bucket_scale;
  7677. std::vector<int> bucket_idx(candidates->size);
  7678. std::vector<int> histo(nbuckets, 0);
  7679. for (int i = 0; i < (int)candidates->size; ++i) {
  7680. const float val = candidates->data[i].logit;
  7681. int ib = int(bucket_scale * val + bucker_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low);
  7682. ib = std::max(0, std::min(nbuckets-1, ib));
  7683. bucket_idx[i] = ib;
  7684. ++histo[ib];
  7685. }
  7686. int nhave = 0;
  7687. int ib = nbuckets - 1;
  7688. for ( ; ib >= 0; --ib) {
  7689. nhave += histo[ib];
  7690. if (nhave >= k) break;
  7691. }
  7692. std::vector<llama_token_data> tmp_tokens(nhave);
  7693. auto ptr = tmp_tokens.data();
  7694. std::vector<llama_token_data*> bucket_ptrs;
  7695. bucket_ptrs.reserve(nbuckets - ib);
  7696. for (int j = nbuckets - 1; j >= ib; --j) {
  7697. bucket_ptrs.push_back(ptr);
  7698. ptr += histo[j];
  7699. }
  7700. for (int i = 0; i < (int)candidates->size; ++i) {
  7701. int j = bucket_idx[i];
  7702. if (j >= ib) {
  7703. *bucket_ptrs[nbuckets-1-j]++ = candidates->data[i];
  7704. }
  7705. }
  7706. ptr = tmp_tokens.data();
  7707. int ndone = 0;
  7708. for (int j = nbuckets-1; j > ib; --j) {
  7709. std::sort(ptr, ptr + histo[j], comp);
  7710. ptr += histo[j];
  7711. ndone += histo[j];
  7712. }
  7713. std::partial_sort(ptr, ptr + k - ndone, ptr + histo[ib], comp);
  7714. std::memcpy(candidates->data, tmp_tokens.data(), k*sizeof(llama_token_data));
  7715. }
  7716. candidates->sorted = true;
  7717. }
  7718. candidates->size = k;
  7719. if (ctx) {
  7720. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7721. }
  7722. }
  7723. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  7724. if (p >= 1.0f) {
  7725. return;
  7726. }
  7727. llama_sample_softmax(ctx, candidates);
  7728. const int64_t t_start_sample_us = ggml_time_us();
  7729. // Compute the cumulative probabilities
  7730. float cum_sum = 0.0f;
  7731. size_t last_idx = candidates->size;
  7732. for (size_t i = 0; i < candidates->size; ++i) {
  7733. cum_sum += candidates->data[i].p;
  7734. // Check if the running sum is at least p or if we have kept at least min_keep tokens
  7735. // we set the last index to i+1 to indicate that the current iterate should be included in the set
  7736. if (cum_sum >= p && i + 1 >= min_keep) {
  7737. last_idx = i + 1;
  7738. break;
  7739. }
  7740. }
  7741. // Resize the output vector to keep only the top-p tokens
  7742. candidates->size = last_idx;
  7743. if (ctx) {
  7744. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7745. }
  7746. }
  7747. void llama_sample_min_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  7748. if (p <= 0.0f || !candidates->size) {
  7749. return;
  7750. }
  7751. const int64_t t_start_sample_us = ggml_time_us();
  7752. bool min_p_applied = false;
  7753. // if the candidates aren't sorted, try the unsorted implementation first
  7754. if (!candidates->sorted) {
  7755. std::vector<llama_token_data> filtered_tokens;
  7756. float max_logit = -FLT_MAX;
  7757. for (size_t i = 0; i < candidates->size; ++i) {
  7758. max_logit = std::max(max_logit, candidates->data[i].logit);
  7759. }
  7760. const float min_logit = max_logit + logf(p); // min logit for p_i >= p * p_max
  7761. for (size_t i = 0; i < candidates->size; ++i) {
  7762. if (candidates->data[i].logit >= min_logit) {
  7763. filtered_tokens.push_back(candidates->data[i]);
  7764. }
  7765. }
  7766. // if we have enough values the operation was a success
  7767. if (filtered_tokens.size() >= min_keep) {
  7768. memcpy(candidates->data, filtered_tokens.data(), filtered_tokens.size()*sizeof(llama_token_data));
  7769. candidates->size = filtered_tokens.size();
  7770. min_p_applied = true;
  7771. }
  7772. }
  7773. // if the candidates are sorted or the unsorted implementation failed, use this implementation
  7774. if (!min_p_applied) {
  7775. // Sort the logits in descending order
  7776. if (!candidates->sorted) {
  7777. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  7778. return a.logit > b.logit;
  7779. });
  7780. candidates->sorted = true;
  7781. }
  7782. const float min_logit = candidates->data[0].logit + logf(p); // min logit for p_i >= p * p_max
  7783. size_t i = 1; // first token always matches
  7784. for (; i < candidates->size; ++i) {
  7785. if (candidates->data[i].logit < min_logit && i >= min_keep) {
  7786. break; // prob too small
  7787. }
  7788. }
  7789. // Resize the output vector to keep only the matching tokens
  7790. candidates->size = i;
  7791. }
  7792. if (ctx) {
  7793. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7794. }
  7795. }
  7796. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  7797. if (z >= 1.0f || candidates->size <= 2) {
  7798. return;
  7799. }
  7800. llama_sample_softmax(nullptr, candidates);
  7801. const int64_t t_start_sample_us = ggml_time_us();
  7802. // Compute the first and second derivatives
  7803. std::vector<float> first_derivatives(candidates->size - 1);
  7804. std::vector<float> second_derivatives(candidates->size - 2);
  7805. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  7806. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  7807. }
  7808. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  7809. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  7810. }
  7811. // Calculate absolute value of second derivatives
  7812. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  7813. second_derivatives[i] = std::abs(second_derivatives[i]);
  7814. }
  7815. // Normalize the second derivatives
  7816. {
  7817. const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  7818. if (second_derivatives_sum > 1e-6f) {
  7819. for (float & value : second_derivatives) {
  7820. value /= second_derivatives_sum;
  7821. }
  7822. } else {
  7823. for (float & value : second_derivatives) {
  7824. value = 1.0f / second_derivatives.size();
  7825. }
  7826. }
  7827. }
  7828. float cum_sum = 0.0f;
  7829. size_t last_idx = candidates->size;
  7830. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  7831. cum_sum += second_derivatives[i];
  7832. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  7833. if (cum_sum > z && i >= min_keep) {
  7834. last_idx = i;
  7835. break;
  7836. }
  7837. }
  7838. // Resize the output vector to keep only the tokens above the tail location
  7839. candidates->size = last_idx;
  7840. if (ctx) {
  7841. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7842. }
  7843. }
  7844. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  7845. // Reference implementation:
  7846. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  7847. if (p >= 1.0f) {
  7848. return;
  7849. }
  7850. // Compute the softmax of logits and calculate entropy
  7851. llama_sample_softmax(nullptr, candidates);
  7852. const int64_t t_start_sample_us = ggml_time_us();
  7853. float entropy = 0.0f;
  7854. for (size_t i = 0; i < candidates->size; ++i) {
  7855. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  7856. }
  7857. // Compute the absolute difference between negative log probability and entropy for each candidate
  7858. std::vector<float> shifted_scores;
  7859. for (size_t i = 0; i < candidates->size; ++i) {
  7860. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  7861. shifted_scores.push_back(shifted_score);
  7862. }
  7863. // Sort tokens based on the shifted_scores and their corresponding indices
  7864. std::vector<size_t> indices(candidates->size);
  7865. std::iota(indices.begin(), indices.end(), 0);
  7866. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  7867. return shifted_scores[a] < shifted_scores[b];
  7868. });
  7869. // Compute the cumulative probabilities
  7870. float cum_sum = 0.0f;
  7871. size_t last_idx = indices.size();
  7872. for (size_t i = 0; i < indices.size(); ++i) {
  7873. size_t idx = indices[i];
  7874. cum_sum += candidates->data[idx].p;
  7875. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  7876. if (cum_sum > p && i >= min_keep - 1) {
  7877. last_idx = i + 1;
  7878. break;
  7879. }
  7880. }
  7881. // Resize the output vector to keep only the locally typical tokens
  7882. std::vector<llama_token_data> new_candidates;
  7883. for (size_t i = 0; i < last_idx; ++i) {
  7884. size_t idx = indices[i];
  7885. new_candidates.push_back(candidates->data[idx]);
  7886. }
  7887. // Replace the data in candidates with the new_candidates data
  7888. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  7889. candidates->size = new_candidates.size();
  7890. candidates->sorted = false;
  7891. if (ctx) {
  7892. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7893. }
  7894. }
  7895. void llama_sample_entropy(struct llama_context * ctx, llama_token_data_array * candidates_p, float min_temp, float max_temp, float exponent_val) {
  7896. const int64_t t_start_sample_us = ggml_time_us();
  7897. // no need to do anything if there is only one (or zero) candidates
  7898. if(candidates_p->size <= 1) {
  7899. return;
  7900. }
  7901. // Calculate maximum possible entropy
  7902. float max_entropy = -logf(1.0f / candidates_p->size);
  7903. llama_sample_softmax(nullptr, candidates_p);
  7904. // Calculate entropy of the softmax probabilities
  7905. float entropy = 0.0f;
  7906. for (size_t i = 0; i < candidates_p->size; ++i) {
  7907. float prob = candidates_p->data[i].p;
  7908. if (prob > 0.0f) { // Ensure no log(0)
  7909. entropy -= prob * logf(prob);
  7910. }
  7911. }
  7912. // Normalize the entropy (max_entropy cannot be 0 here because we checked candidates_p->size != 1 above)
  7913. float normalized_entropy = entropy / max_entropy;
  7914. // Map the normalized entropy to the desired temperature range using the power function
  7915. float dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent_val);
  7916. #ifdef DEBUG
  7917. LLAMA_LOG_INFO("Your text maxtemp value is: %f\n", max_temp);
  7918. LLAMA_LOG_INFO("Entropy: %f\n", entropy);
  7919. LLAMA_LOG_INFO("Max Possible Entropy: %f\n", max_entropy);
  7920. LLAMA_LOG_INFO("Normalized Entropy: %f\n", normalized_entropy);
  7921. LLAMA_LOG_INFO("Exponent: %f\n", exponent_val);
  7922. LLAMA_LOG_INFO("Dynamic Temperature (dyn_temp): %f\n", dyn_temp);
  7923. #endif
  7924. // Apply the dynamically calculated temperature scaling
  7925. for (size_t i = 0; i < candidates_p->size; ++i) {
  7926. candidates_p->data[i].logit /= dyn_temp;
  7927. }
  7928. // Re-compute softmax probabilities after scaling logits with dynamic temperature
  7929. double max_l_double = candidates_p->data[0].logit;
  7930. double cum_sum_double = 0.0;
  7931. for (size_t i = 0; i < candidates_p->size; ++i) {
  7932. double p = exp(candidates_p->data[i].logit - max_l_double);
  7933. candidates_p->data[i].p = p; // Store the scaled probability
  7934. cum_sum_double += p;
  7935. }
  7936. for (size_t i = 0; i < candidates_p->size; ++i) {
  7937. candidates_p->data[i].p /= cum_sum_double; // Re-normalize the probabilities
  7938. }
  7939. #ifdef DEBUG
  7940. // Print the updated top 25 probabilities after temperature scaling
  7941. LLAMA_LOG_INFO("\nUpdated Top 25 Probabilities After Dynamic Temperature Scaling (in percentages):\n");
  7942. for (size_t i = 0; i < 25 && i < candidates_p->size; ++i) {
  7943. LLAMA_LOG_INFO("Token %zu: %f%%\n", i + 1, candidates_p->data[i].p * 100.0f);
  7944. }
  7945. #endif
  7946. if (ctx) {
  7947. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7948. }
  7949. }
  7950. void llama_sample_temp(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  7951. const int64_t t_start_sample_us = ggml_time_us();
  7952. for (size_t i = 0; i < candidates_p->size; ++i) {
  7953. candidates_p->data[i].logit /= temp;
  7954. }
  7955. if (ctx) {
  7956. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7957. }
  7958. }
  7959. void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  7960. llama_sample_temp(ctx, candidates_p, temp);
  7961. }
  7962. void llama_sample_repetition_penalties(
  7963. struct llama_context * ctx,
  7964. llama_token_data_array * candidates,
  7965. const llama_token * last_tokens,
  7966. size_t penalty_last_n,
  7967. float penalty_repeat,
  7968. float penalty_freq,
  7969. float penalty_present) {
  7970. if (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f)) {
  7971. return;
  7972. }
  7973. const int64_t t_start_sample_us = ggml_time_us();
  7974. // Create a frequency map to count occurrences of each token in last_tokens
  7975. std::unordered_map<llama_token, int> token_count;
  7976. for (size_t i = 0; i < penalty_last_n; ++i) {
  7977. token_count[last_tokens[i]]++;
  7978. }
  7979. // Apply frequency and presence penalties to the candidates
  7980. for (size_t i = 0; i < candidates->size; ++i) {
  7981. const auto token_iter = token_count.find(candidates->data[i].id);
  7982. if (token_iter == token_count.end()) {
  7983. continue;
  7984. }
  7985. const int count = token_iter->second;
  7986. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  7987. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  7988. if (candidates->data[i].logit <= 0) {
  7989. candidates->data[i].logit *= penalty_repeat;
  7990. } else {
  7991. candidates->data[i].logit /= penalty_repeat;
  7992. }
  7993. candidates->data[i].logit -= float(count) * penalty_freq + float(count > 0) * penalty_present;
  7994. }
  7995. candidates->sorted = false;
  7996. if (ctx) {
  7997. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7998. }
  7999. }
  8000. void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
  8001. GGML_ASSERT(ctx);
  8002. const int64_t t_start_sample_us = ggml_time_us();
  8003. bool allow_eos = false;
  8004. for (const auto & stack : grammar->stacks) {
  8005. if (stack.empty()) {
  8006. allow_eos = true;
  8007. break;
  8008. }
  8009. }
  8010. const llama_token eos = llama_token_eos(&ctx->model);
  8011. std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
  8012. candidates_decoded.reserve(candidates->size);
  8013. std::vector<llama_grammar_candidate> candidates_grammar;
  8014. candidates_grammar.reserve(candidates->size);
  8015. for (size_t i = 0; i < candidates->size; ++i) {
  8016. const llama_token id = candidates->data[i].id;
  8017. const std::string piece = llama_token_to_piece(ctx, id);
  8018. if (id == eos) {
  8019. if (!allow_eos) {
  8020. candidates->data[i].logit = -INFINITY;
  8021. }
  8022. } else if (piece.empty() || piece[0] == 0) {
  8023. candidates->data[i].logit = -INFINITY;
  8024. } else {
  8025. candidates_decoded.push_back(decode_utf8(piece, grammar->partial_utf8));
  8026. candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second });
  8027. }
  8028. }
  8029. const auto rejects = llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
  8030. for (const auto & reject : rejects) {
  8031. candidates->data[reject.index].logit = -INFINITY;
  8032. }
  8033. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8034. }
  8035. static void llama_log_softmax(float * array, size_t size) {
  8036. float max_l = *std::max_element(array, array + size);
  8037. float sum = 0.f;
  8038. for (size_t i = 0; i < size; ++i) {
  8039. float p = expf(array[i] - max_l);
  8040. sum += p;
  8041. array[i] = p;
  8042. }
  8043. for (size_t i = 0; i < size; ++i) {
  8044. array[i] = logf(array[i] / sum);
  8045. }
  8046. }
  8047. void llama_sample_apply_guidance(
  8048. struct llama_context * ctx,
  8049. float * logits,
  8050. float * logits_guidance,
  8051. float scale) {
  8052. GGML_ASSERT(ctx);
  8053. const auto t_start_sample_us = ggml_time_us();
  8054. const auto n_vocab = llama_n_vocab(llama_get_model(ctx));
  8055. llama_log_softmax(logits, n_vocab);
  8056. llama_log_softmax(logits_guidance, n_vocab);
  8057. for (int i = 0; i < n_vocab; ++i) {
  8058. auto & l = logits[i];
  8059. const auto & g = logits_guidance[i];
  8060. l = scale * (l - g) + g;
  8061. }
  8062. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8063. }
  8064. void llama_sample_classifier_free_guidance(
  8065. struct llama_context * ctx,
  8066. llama_token_data_array * candidates,
  8067. struct llama_context * guidance_ctx,
  8068. float scale) {
  8069. GGML_ASSERT(ctx);
  8070. int64_t t_start_sample_us;
  8071. t_start_sample_us = ggml_time_us();
  8072. const size_t n_vocab = llama_n_vocab(llama_get_model(ctx));
  8073. GGML_ASSERT(n_vocab == candidates->size);
  8074. GGML_ASSERT(!candidates->sorted);
  8075. std::vector<float> logits_base(n_vocab);
  8076. for (size_t i = 0; i < n_vocab; ++i) {
  8077. logits_base[i] = candidates->data[i].logit;
  8078. }
  8079. float * logits_guidance = llama_get_logits(guidance_ctx);
  8080. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8081. llama_sample_apply_guidance(ctx, logits_base.data(), logits_guidance, scale);
  8082. t_start_sample_us = ggml_time_us();
  8083. for (size_t i = 0; i < n_vocab; ++i) {
  8084. candidates->data[i].logit = logits_base[i];
  8085. }
  8086. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8087. }
  8088. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int32_t m, float * mu) {
  8089. GGML_ASSERT(ctx);
  8090. auto N = float(llama_n_vocab(llama_get_model(ctx)));
  8091. int64_t t_start_sample_us;
  8092. t_start_sample_us = ggml_time_us();
  8093. llama_sample_softmax(nullptr, candidates);
  8094. // Estimate s_hat using the most probable m tokens
  8095. float s_hat = 0.0;
  8096. float sum_ti_bi = 0.0;
  8097. float sum_ti_sq = 0.0;
  8098. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  8099. float t_i = logf(float(i + 2) / float(i + 1));
  8100. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  8101. sum_ti_bi += t_i * b_i;
  8102. sum_ti_sq += t_i * t_i;
  8103. }
  8104. s_hat = sum_ti_bi / sum_ti_sq;
  8105. // Compute k from the estimated s_hat and target surprise value
  8106. float epsilon_hat = s_hat - 1;
  8107. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  8108. // Sample the next word X using top-k sampling
  8109. llama_sample_top_k(nullptr, candidates, int(k), 1);
  8110. if (ctx) {
  8111. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8112. }
  8113. llama_token X = llama_sample_token(ctx, candidates);
  8114. t_start_sample_us = ggml_time_us();
  8115. // Compute error as the difference between observed surprise and target surprise value
  8116. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  8117. return candidate.id == X;
  8118. }));
  8119. float observed_surprise = -log2f(candidates->data[X_idx].p);
  8120. float e = observed_surprise - tau;
  8121. // Update mu using the learning rate and error
  8122. *mu = *mu - eta * e;
  8123. if (ctx) {
  8124. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8125. }
  8126. return X;
  8127. }
  8128. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  8129. int64_t t_start_sample_us;
  8130. t_start_sample_us = ggml_time_us();
  8131. llama_sample_softmax(ctx, candidates);
  8132. // Truncate the words with surprise values greater than mu
  8133. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  8134. return -log2f(candidate.p) > *mu;
  8135. }));
  8136. if (candidates->size == 0) {
  8137. candidates->size = 1;
  8138. }
  8139. if (ctx) {
  8140. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8141. }
  8142. // Normalize the probabilities of the remaining words
  8143. llama_sample_softmax(ctx, candidates);
  8144. // Sample the next word X from the remaining words
  8145. llama_token X = llama_sample_token(ctx, candidates);
  8146. t_start_sample_us = ggml_time_us();
  8147. // Compute error as the difference between observed surprise and target surprise value
  8148. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  8149. return candidate.id == X;
  8150. }));
  8151. float observed_surprise = -log2f(candidates->data[X_idx].p);
  8152. float e = observed_surprise - tau;
  8153. // Update mu using the learning rate and error
  8154. *mu = *mu - eta * e;
  8155. if (ctx) {
  8156. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8157. }
  8158. return X;
  8159. }
  8160. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  8161. const int64_t t_start_sample_us = ggml_time_us();
  8162. // Find max element
  8163. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  8164. return a.logit < b.logit;
  8165. });
  8166. llama_token result = max_iter->id;
  8167. if (ctx) {
  8168. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8169. ctx->n_sample++;
  8170. }
  8171. return result;
  8172. }
  8173. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  8174. GGML_ASSERT(ctx);
  8175. const int64_t t_start_sample_us = ggml_time_us();
  8176. llama_sample_softmax(nullptr, candidates);
  8177. std::vector<float> probs;
  8178. probs.reserve(candidates->size);
  8179. for (size_t i = 0; i < candidates->size; ++i) {
  8180. probs.push_back(candidates->data[i].p);
  8181. }
  8182. std::discrete_distribution<> dist(probs.begin(), probs.end());
  8183. auto & rng = ctx->rng;
  8184. int idx = dist(rng);
  8185. llama_token result = candidates->data[idx].id;
  8186. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8187. ctx->n_sample++;
  8188. return result;
  8189. }
  8190. void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
  8191. const int64_t t_start_sample_us = ggml_time_us();
  8192. if (token == llama_token_eos(&ctx->model)) {
  8193. for (const auto & stack : grammar->stacks) {
  8194. if (stack.empty()) {
  8195. return;
  8196. }
  8197. }
  8198. GGML_ASSERT(false);
  8199. }
  8200. const std::string piece = llama_token_to_piece(ctx, token);
  8201. // Note terminating 0 in decoded string
  8202. const auto decoded = decode_utf8(piece, grammar->partial_utf8);
  8203. const auto & code_points = decoded.first;
  8204. for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
  8205. grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
  8206. }
  8207. grammar->partial_utf8 = decoded.second;
  8208. GGML_ASSERT(!grammar->stacks.empty());
  8209. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8210. }
  8211. //
  8212. // Beam search
  8213. //
  8214. struct llama_beam {
  8215. std::vector<llama_token> tokens;
  8216. float p; // Cumulative beam probability (renormalized relative to all beams)
  8217. bool eob; // Initialize end-of-beam to false. Callback sets this to true.
  8218. // Sort beams by probability. In case of ties, prefer beams at eob.
  8219. bool operator<(const llama_beam & rhs) const {
  8220. return std::make_pair(p, eob) < std::make_pair(rhs.p, rhs.eob);
  8221. }
  8222. // Shift off first n tokens and discard them.
  8223. void shift_tokens(const size_t n) {
  8224. if (n) {
  8225. std::copy(tokens.begin() + n, tokens.end(), tokens.begin());
  8226. tokens.resize(tokens.size() - n);
  8227. }
  8228. }
  8229. llama_beam_view view() const { return {tokens.data(), tokens.size(), p, eob}; }
  8230. };
  8231. // A struct for calculating logit-related info.
  8232. struct llama_logit_info {
  8233. const float * const logits;
  8234. const int n_vocab;
  8235. const float max_l;
  8236. const float normalizer;
  8237. struct sum_exp {
  8238. float max_l;
  8239. float operator()(float sum, float l) const { return sum + std::exp(l - max_l); }
  8240. };
  8241. llama_logit_info(llama_context * ctx)
  8242. : logits(llama_get_logits(ctx))
  8243. , n_vocab(llama_n_vocab(llama_get_model(ctx)))
  8244. , max_l(*std::max_element(logits, logits + n_vocab))
  8245. , normalizer(1.0f / std::accumulate(logits, logits + n_vocab, 0.0f, sum_exp{max_l}))
  8246. { }
  8247. llama_token_data get_token_data(const llama_token token_id) const {
  8248. constexpr auto p = std::numeric_limits<float>::quiet_NaN(); // never used
  8249. return {token_id, logits[token_id], p};
  8250. }
  8251. // Return top k token_data by logit.
  8252. std::vector<llama_token_data> top_k(size_t k) {
  8253. std::vector<llama_token_data> min_heap; // min-heap by logit
  8254. const llama_token k_min = std::min(static_cast<llama_token>(k), n_vocab);
  8255. min_heap.reserve(k_min);
  8256. for (llama_token token_id = 0 ; token_id < k_min ; ++token_id) {
  8257. min_heap.push_back(get_token_data(token_id));
  8258. }
  8259. auto comp = [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; };
  8260. std::make_heap(min_heap.begin(), min_heap.end(), comp);
  8261. for (llama_token token_id = k_min ; token_id < n_vocab ; ++token_id) {
  8262. if (min_heap.front().logit < logits[token_id]) {
  8263. std::pop_heap(min_heap.begin(), min_heap.end(), comp);
  8264. min_heap.back().id = token_id;
  8265. min_heap.back().logit = logits[token_id];
  8266. std::push_heap(min_heap.begin(), min_heap.end(), comp);
  8267. }
  8268. }
  8269. return min_heap;
  8270. }
  8271. float probability_from_logit(float logit) const {
  8272. return normalizer * std::exp(logit - max_l);
  8273. }
  8274. };
  8275. struct llama_beam_search_data {
  8276. llama_context * ctx;
  8277. size_t n_beams;
  8278. int n_past;
  8279. int n_predict;
  8280. std::vector<llama_beam> beams;
  8281. std::vector<llama_beam> next_beams;
  8282. // Re-calculated on each loop iteration
  8283. size_t common_prefix_length;
  8284. // Used to communicate to/from callback on beams state.
  8285. std::vector<llama_beam_view> beam_views;
  8286. llama_beam_search_data(llama_context * ctx, size_t n_beams, int n_past, int n_predict)
  8287. : ctx(ctx)
  8288. , n_beams(n_beams)
  8289. , n_past(n_past)
  8290. , n_predict(n_predict)
  8291. , beam_views(n_beams) {
  8292. beams.reserve(n_beams);
  8293. next_beams.reserve(n_beams);
  8294. }
  8295. // Collapse beams to a single beam given by index.
  8296. void collapse_beams(const size_t beam_idx) {
  8297. if (0u < beam_idx) {
  8298. std::swap(beams[0], beams[beam_idx]);
  8299. }
  8300. beams.resize(1);
  8301. }
  8302. // Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
  8303. // The repetitive patterns below reflect the 2 stages of heaps:
  8304. // * Gather elements until the vector is full, then call std::make_heap() on it.
  8305. // * If the heap is full and a new element is found that should be included, pop the
  8306. // least element to the back(), replace it with the new, then push it into the heap.
  8307. void fill_next_beams_by_top_probabilities(llama_beam & beam) {
  8308. // Min-heaps use a greater-than comparator.
  8309. const auto comp = [](const llama_beam & a, const llama_beam & b) { return a.p > b.p; };
  8310. if (beam.eob) {
  8311. // beam is at end-of-sentence, so just copy it to next_beams if its probability is high enough.
  8312. if (next_beams.size() < n_beams) {
  8313. next_beams.push_back(std::move(beam));
  8314. if (next_beams.size() == n_beams) {
  8315. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  8316. }
  8317. } else if (next_beams.front().p < beam.p) {
  8318. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  8319. next_beams.back() = std::move(beam);
  8320. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  8321. }
  8322. } else {
  8323. // beam is not at end-of-sentence, so branch with next top_k tokens.
  8324. if (!beam.tokens.empty()) {
  8325. llama_decode(ctx, llama_batch_get_one(beam.tokens.data(), beam.tokens.size(), n_past, 0));
  8326. }
  8327. llama_logit_info logit_info(ctx);
  8328. std::vector<llama_token_data> next_tokens = logit_info.top_k(n_beams);
  8329. size_t i=0;
  8330. if (next_beams.size() < n_beams) {
  8331. for (; next_beams.size() < n_beams ; ++i) {
  8332. llama_beam next_beam = beam;
  8333. next_beam.tokens.push_back(next_tokens[i].id);
  8334. next_beam.p *= logit_info.probability_from_logit(next_tokens[i].logit);
  8335. next_beams.push_back(std::move(next_beam));
  8336. }
  8337. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  8338. } else {
  8339. for (; next_beams.front().p == 0.0f ; ++i) {
  8340. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  8341. next_beams.back() = beam;
  8342. next_beams.back().tokens.push_back(next_tokens[i].id);
  8343. next_beams.back().p *= logit_info.probability_from_logit(next_tokens[i].logit);
  8344. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  8345. }
  8346. }
  8347. for (; i < n_beams ; ++i) {
  8348. const float next_p = beam.p * logit_info.probability_from_logit(next_tokens[i].logit);
  8349. if (next_beams.front().p < next_p) {
  8350. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  8351. next_beams.back() = beam;
  8352. next_beams.back().tokens.push_back(next_tokens[i].id);
  8353. next_beams.back().p = next_p;
  8354. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  8355. }
  8356. }
  8357. }
  8358. }
  8359. // Find common_prefix_length based on beams.
  8360. // Requires beams is not empty.
  8361. size_t find_common_prefix_length() {
  8362. size_t common_prefix_length = beams[0].tokens.size();
  8363. for (size_t i = 1 ; i < beams.size() ; ++i) {
  8364. common_prefix_length = std::min(common_prefix_length, beams[i].tokens.size());
  8365. for (size_t j = 0 ; j < common_prefix_length ; ++j) {
  8366. if (beams[0].tokens[j] != beams[i].tokens[j]) {
  8367. common_prefix_length = j;
  8368. break;
  8369. }
  8370. }
  8371. }
  8372. return common_prefix_length;
  8373. }
  8374. // Construct beams_state to send back to caller via the callback function.
  8375. // Side effect: set common_prefix_length = find_common_prefix_length();
  8376. llama_beams_state get_beams_state(const bool last_call) {
  8377. for (size_t i = 0 ; i < beams.size() ; ++i) {
  8378. beam_views[i] = beams[i].view();
  8379. }
  8380. common_prefix_length = find_common_prefix_length();
  8381. return {beam_views.data(), beams.size(), common_prefix_length, last_call};
  8382. }
  8383. // Loop:
  8384. // * while i < n_predict, AND
  8385. // * any of the beams have not yet reached end-of-beam (eob), AND
  8386. // * the highest probability beam(s) (plural in case of ties) are not at end-of-sentence
  8387. // (since all other beam probabilities can only decrease)
  8388. void loop(const llama_beam_search_callback_fn_t callback, void * const callback_data) {
  8389. beams.push_back({{}, 1.0f, false}); // Start with one empty beam w/ probability = 1.0 and !eob.
  8390. const auto not_eob = [](const llama_beam & beam) { return !beam.eob; };
  8391. for (int i = 0 ; i < n_predict && std::any_of(beams.begin(),beams.end(),not_eob) &&
  8392. !beams[top_beam_index()].eob ; ++i) {
  8393. callback(callback_data, get_beams_state(false)); // Sets common_prefix_length
  8394. update_beams_from_beam_views(); // Update values (p,eob) that callback may have changed.
  8395. if (common_prefix_length) {
  8396. llama_decode(ctx, llama_batch_get_one(beams[0].tokens.data(), common_prefix_length, n_past, 0));
  8397. n_past += common_prefix_length;
  8398. }
  8399. // Zero-out next_beam probabilities to place them last in following min-heap.
  8400. std::for_each(next_beams.begin(), next_beams.end(), [](llama_beam & beam) { beam.p = 0.0f; });
  8401. for (llama_beam & beam : beams) {
  8402. beam.shift_tokens(common_prefix_length);
  8403. fill_next_beams_by_top_probabilities(beam);
  8404. }
  8405. // next_beams become the beams of next/final iteration. Swap them to re-use memory.
  8406. beams.swap(next_beams);
  8407. renormalize_beam_probabilities(beams);
  8408. }
  8409. collapse_beams(top_beam_index());
  8410. callback(callback_data, get_beams_state(true));
  8411. }
  8412. // As beams grow, the cumulative probabilities decrease.
  8413. // Renormalize them to avoid floating point underflow.
  8414. static void renormalize_beam_probabilities(std::vector<llama_beam> & beams) {
  8415. const auto sum_p = [](float sum, llama_beam & beam) { return sum + beam.p; };
  8416. const float inv_sum = 1.0f / std::accumulate(beams.begin(), beams.end(), 0.0f, sum_p);
  8417. std::for_each(beams.begin(), beams.end(), [=](llama_beam & beam) { beam.p *= inv_sum; });
  8418. }
  8419. // Assumes beams is non-empty. Uses llama_beam::operator<() for ordering.
  8420. size_t top_beam_index() {
  8421. return std::max_element(beams.begin(), beams.end()) - beams.begin();
  8422. }
  8423. // Copy (p,eob) for each beam which may have been changed by the callback.
  8424. void update_beams_from_beam_views() {
  8425. for (size_t i = 0 ; i < beams.size() ; ++i) {
  8426. beams[i].p = beam_views[i].p;
  8427. beams[i].eob = beam_views[i].eob;
  8428. }
  8429. }
  8430. };
  8431. void llama_beam_search(llama_context * ctx,
  8432. llama_beam_search_callback_fn_t callback, void * callback_data,
  8433. size_t n_beams, int n_past, int n_predict) {
  8434. assert(ctx);
  8435. const int64_t t_start_sample_us = ggml_time_us();
  8436. llama_beam_search_data beam_search_data(ctx, n_beams, n_past, n_predict);
  8437. beam_search_data.loop(callback, callback_data);
  8438. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8439. ctx->n_sample++;
  8440. }
  8441. //
  8442. // quantization
  8443. //
  8444. struct quantize_state_internal {
  8445. const llama_model & model;
  8446. const llama_model_quantize_params * params;
  8447. int n_attention_wv = 0;
  8448. int n_ffn_down = 0;
  8449. int n_ffn_gate = 0;
  8450. int n_ffn_up = 0;
  8451. int i_attention_wv = 0;
  8452. int i_ffn_down = 0;
  8453. int i_ffn_gate = 0;
  8454. int i_ffn_up = 0;
  8455. int n_k_quantized = 0;
  8456. int n_fallback = 0;
  8457. bool has_imatrix = false;
  8458. quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
  8459. : model(model)
  8460. , params(params)
  8461. {}
  8462. };
  8463. static void llama_convert_tensor_internal(
  8464. struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
  8465. const size_t nelements, const int nthread
  8466. ) {
  8467. if (output.size() < nelements) {
  8468. output.resize(nelements);
  8469. }
  8470. float * f32_output = (float *) output.data();
  8471. ggml_type_traits_t qtype;
  8472. if (ggml_is_quantized(tensor->type)) {
  8473. qtype = ggml_internal_get_type_traits(tensor->type);
  8474. if (qtype.to_float == NULL) {
  8475. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
  8476. }
  8477. } else if (tensor->type != GGML_TYPE_F16) {
  8478. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
  8479. }
  8480. if (nthread < 2) {
  8481. if (tensor->type == GGML_TYPE_F16) {
  8482. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
  8483. } else if (ggml_is_quantized(tensor->type)) {
  8484. qtype.to_float(tensor->data, f32_output, nelements);
  8485. } else {
  8486. GGML_ASSERT(false); // unreachable
  8487. }
  8488. return;
  8489. }
  8490. size_t block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type);
  8491. size_t block_size_bytes = ggml_type_size(tensor->type);
  8492. GGML_ASSERT(nelements % block_size == 0);
  8493. size_t nblocks = nelements / block_size;
  8494. size_t blocks_per_thread = nblocks / nthread;
  8495. size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  8496. size_t in_buff_offs = 0;
  8497. size_t out_buff_offs = 0;
  8498. for (int tnum = 0; tnum < nthread; tnum++) {
  8499. size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  8500. size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
  8501. size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  8502. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  8503. if (typ == GGML_TYPE_F16) {
  8504. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  8505. } else {
  8506. qtype.to_float(inbuf, outbuf, nels);
  8507. }
  8508. };
  8509. workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
  8510. in_buff_offs += thr_block_bytes;
  8511. out_buff_offs += thr_elems;
  8512. }
  8513. for (auto & w : workers) { w.join(); }
  8514. workers.clear();
  8515. }
  8516. static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
  8517. const std::string name = ggml_get_name(tensor);
  8518. // TODO: avoid hardcoded tensor names - use the TN_* constants
  8519. const llm_arch arch = qs.model.arch;
  8520. const auto tn = LLM_TN(arch);
  8521. auto use_more_bits = [](int i_layer, int num_layers) -> bool {
  8522. return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
  8523. };
  8524. const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
  8525. auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
  8526. if (n_expert > 1) {
  8527. // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but iccasionally randomly
  8528. // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
  8529. // for getting the current layer as I initially thought, and we need to resort to parsing the
  8530. // tensor name.
  8531. n_layer /= n_expert;
  8532. if (sscanf(name, "blk.%d.", &i_layer) != 1) {
  8533. throw std::runtime_error(format("Failed to determine layer for tensor %s", name));
  8534. }
  8535. if (i_layer < 0 || i_layer >= n_layer) {
  8536. throw std::runtime_error(format("Bad layer %d for tensor %s. Must be in [0, %d)", i_layer, name, n_layer));
  8537. }
  8538. }
  8539. return std::make_pair(i_layer, n_layer);
  8540. };
  8541. if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
  8542. int nx = tensor->ne[0];
  8543. if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
  8544. new_type = GGML_TYPE_Q8_0;
  8545. }
  8546. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS) {
  8547. new_type = GGML_TYPE_Q5_K;
  8548. }
  8549. else if (new_type != GGML_TYPE_Q8_0) {
  8550. new_type = GGML_TYPE_Q6_K;
  8551. }
  8552. } else if (name == "token_embd.weight") {
  8553. if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS) {
  8554. new_type = GGML_TYPE_Q2_K;
  8555. }
  8556. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  8557. new_type = GGML_TYPE_Q4_K;
  8558. }
  8559. } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS) {
  8560. if (name.find("attn_v.weight") != std::string::npos) {
  8561. if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
  8562. else new_type = GGML_TYPE_Q2_K;
  8563. ++qs.i_attention_wv;
  8564. }
  8565. else if (name.find("ffn_down") != std::string::npos) {
  8566. if (qs.i_ffn_down < qs.n_ffn_down/8) new_type = GGML_TYPE_Q2_K;
  8567. ++qs.i_ffn_down;
  8568. }
  8569. } else if (name.find("attn_v.weight") != std::string::npos) {
  8570. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
  8571. new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
  8572. }
  8573. else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
  8574. new_type = GGML_TYPE_Q4_K;
  8575. }
  8576. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  8577. new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : !qs.has_imatrix ? GGML_TYPE_Q3_K : GGML_TYPE_IQ3_XXS;
  8578. }
  8579. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  8580. new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  8581. }
  8582. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  8583. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  8584. use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
  8585. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
  8586. else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
  8587. (qs.i_attention_wv < qs.n_attention_wv/8 || qs.i_attention_wv >= 7*qs.n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
  8588. if (qs.model.type == MODEL_70B) {
  8589. // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
  8590. // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
  8591. // nearly negligible increase in model size by quantizing this tensor with more bits:
  8592. if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
  8593. }
  8594. if (qs.model.hparams.n_expert == 8) {
  8595. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  8596. // TODO: explore better strategies
  8597. new_type = GGML_TYPE_Q8_0;
  8598. }
  8599. ++qs.i_attention_wv;
  8600. } else if (name.find("attn_k.weight") != std::string::npos) {
  8601. if (qs.model.hparams.n_expert == 8) {
  8602. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  8603. // TODO: explore better strategies
  8604. new_type = GGML_TYPE_Q8_0;
  8605. }
  8606. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS) {
  8607. new_type = GGML_TYPE_Q2_K;
  8608. }
  8609. } else if (name.find("ffn_down") != std::string::npos) {
  8610. auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
  8611. int i_layer = info.first, n_layer = info.second;
  8612. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  8613. else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS) {
  8614. if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
  8615. }
  8616. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS && !qs.has_imatrix) {
  8617. new_type = i_layer < n_layer/8 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
  8618. }
  8619. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  8620. new_type = i_layer < n_layer/16 ? GGML_TYPE_Q5_K
  8621. : arch != LLM_ARCH_FALCON || use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q4_K
  8622. : GGML_TYPE_Q3_K;
  8623. }
  8624. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
  8625. new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
  8626. }
  8627. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
  8628. if (arch == LLM_ARCH_FALCON) {
  8629. new_type = i_layer < n_layer/16 ? GGML_TYPE_Q6_K :
  8630. use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  8631. } else {
  8632. if (use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
  8633. }
  8634. }
  8635. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
  8636. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && i_layer < n_layer/8) {
  8637. new_type = GGML_TYPE_Q5_K;
  8638. }
  8639. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || ftype == LLAMA_FTYPE_MOSTLY_Q5_0)
  8640. && qs.has_imatrix && i_layer < n_layer/8) {
  8641. // Guard against craziness in the first few ffn_down layers that can happen even with imatrix for Q4_0/Q5_0.
  8642. // We only do it when an imatrix is provided because a) we want to make sure that one can always get the
  8643. // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
  8644. new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
  8645. }
  8646. ++qs.i_ffn_down;
  8647. } else if (name.find("attn_output.weight") != std::string::npos) {
  8648. if (arch != LLM_ARCH_FALCON) {
  8649. if (qs.model.hparams.n_expert == 8) {
  8650. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
  8651. ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ||
  8652. ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
  8653. new_type = GGML_TYPE_Q5_K;
  8654. }
  8655. } else {
  8656. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
  8657. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_Q3_K;
  8658. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) new_type = GGML_TYPE_Q4_K;
  8659. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  8660. }
  8661. } else {
  8662. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  8663. }
  8664. }
  8665. else if (name.find("attn_qkv.weight") != std::string::npos) {
  8666. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  8667. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
  8668. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
  8669. }
  8670. else if (name.find("ffn_gate") != std::string::npos) {
  8671. auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
  8672. int i_layer = info.first, n_layer = info.second;
  8673. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS && !use_more_bits(i_layer, n_layer)) {
  8674. new_type = GGML_TYPE_Q2_K;
  8675. }
  8676. ++qs.i_ffn_gate;
  8677. }
  8678. else if (name.find("ffn_up") != std::string::npos) {
  8679. auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
  8680. int i_layer = info.first, n_layer = info.second;
  8681. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS && !use_more_bits(i_layer, n_layer)) {
  8682. new_type = GGML_TYPE_Q2_K;
  8683. }
  8684. ++qs.i_ffn_up;
  8685. }
  8686. // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  8687. //}
  8688. // IK: let's remove this, else Q2_K is almost the same as Q3_K_S
  8689. //else if (name.find("ffn_gate") != std::string::npos || name.find("ffn_up") != std::string::npos) {
  8690. // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  8691. //}
  8692. // This can be used to reduce the size of the Q5_K_S model.
  8693. // The associated PPL increase is fully in line with the size reduction
  8694. //else {
  8695. // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
  8696. //}
  8697. bool convert_incompatible_tensor = false;
  8698. if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
  8699. new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K ||
  8700. new_type == GGML_TYPE_IQ2_XS || new_type == GGML_TYPE_IQ2_XXS ||
  8701. new_type == GGML_TYPE_IQ3_XXS) {
  8702. int nx = tensor->ne[0];
  8703. int ny = tensor->ne[1];
  8704. if (nx % QK_K != 0) {
  8705. LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
  8706. convert_incompatible_tensor = true;
  8707. } else {
  8708. ++qs.n_k_quantized;
  8709. }
  8710. }
  8711. if (convert_incompatible_tensor) {
  8712. switch (new_type) {
  8713. case GGML_TYPE_IQ2_XXS:
  8714. case GGML_TYPE_IQ2_XS:
  8715. case GGML_TYPE_IQ3_XXS:
  8716. case GGML_TYPE_Q2_K: new_type = GGML_TYPE_Q4_0; break;
  8717. case GGML_TYPE_Q3_K: new_type = GGML_TYPE_Q4_1; break;
  8718. case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
  8719. case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
  8720. case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
  8721. default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
  8722. }
  8723. LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
  8724. ++qs.n_fallback;
  8725. }
  8726. return new_type;
  8727. }
  8728. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  8729. ggml_type quantized_type;
  8730. llama_ftype ftype = params->ftype;
  8731. switch (params->ftype) {
  8732. case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
  8733. case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
  8734. case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
  8735. case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
  8736. case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
  8737. case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
  8738. case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
  8739. // K-quants
  8740. case LLAMA_FTYPE_MOSTLY_Q2_K_S:
  8741. case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
  8742. case LLAMA_FTYPE_MOSTLY_Q3_K_XS:
  8743. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  8744. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  8745. case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break;
  8746. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  8747. case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break;
  8748. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  8749. case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
  8750. case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
  8751. case LLAMA_FTYPE_MOSTLY_IQ2_XXS: quantized_type = GGML_TYPE_IQ2_XXS; break;
  8752. case LLAMA_FTYPE_MOSTLY_IQ2_XS: quantized_type = GGML_TYPE_IQ2_XS; break;
  8753. case LLAMA_FTYPE_MOSTLY_IQ3_XXS: quantized_type = GGML_TYPE_IQ3_XXS; break;
  8754. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  8755. }
  8756. int nthread = params->nthread;
  8757. if (nthread <= 0) {
  8758. nthread = std::thread::hardware_concurrency();
  8759. }
  8760. // mmap consistently increases speed Linux, and also increases speed on Windows with
  8761. // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
  8762. #if defined(__linux__) || defined(_WIN32)
  8763. constexpr bool use_mmap = true;
  8764. #else
  8765. constexpr bool use_mmap = false;
  8766. #endif
  8767. llama_model_loader ml(fname_inp, use_mmap, NULL);
  8768. ml.init_mapping(false); // no prefetching?
  8769. llama_model model;
  8770. llm_load_arch(ml, model);
  8771. llm_load_hparams(ml, model);
  8772. struct quantize_state_internal qs(model, params);
  8773. if (params->only_copy) {
  8774. ftype = model.ftype;
  8775. }
  8776. const std::unordered_map<std::string, std::vector<float>> * imatrix_data = nullptr;
  8777. if (params->imatrix) {
  8778. imatrix_data = static_cast<const std::unordered_map<std::string, std::vector<float>>*>(params->imatrix);
  8779. if (imatrix_data) {
  8780. LLAMA_LOG_INFO("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
  8781. qs.has_imatrix = true;
  8782. }
  8783. }
  8784. const size_t align = GGUF_DEFAULT_ALIGNMENT;
  8785. struct gguf_context * ctx_out = gguf_init_empty();
  8786. // copy the KV pairs from the input file
  8787. gguf_set_kv (ctx_out, ml.ctx_gguf);
  8788. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  8789. gguf_set_val_u32(ctx_out, "general.file_type", ftype);
  8790. for (int i = 0; i < ml.n_tensors; ++i) {
  8791. struct ggml_tensor * meta = ml.get_tensor_meta(i);
  8792. const std::string name = ggml_get_name(meta);
  8793. // TODO: avoid hardcoded tensor names - use the TN_* constants
  8794. if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
  8795. ++qs.n_attention_wv;
  8796. }
  8797. else if (name.find("ffn_down") != std::string::npos) {
  8798. ++qs.n_ffn_down;
  8799. }
  8800. else if (name.find("ffn_gate") != std::string::npos) {
  8801. ++qs.n_ffn_gate;
  8802. }
  8803. else if (name.find("ffn_up") != std::string::npos) {
  8804. ++qs.n_ffn_up;
  8805. }
  8806. }
  8807. if (qs.n_attention_wv != qs.n_ffn_down || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) {
  8808. LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_ffn_down = %d, hparams.n_layer = %d\n",
  8809. __func__, qs.n_attention_wv, qs.n_ffn_down, model.hparams.n_layer);
  8810. }
  8811. size_t total_size_org = 0;
  8812. size_t total_size_new = 0;
  8813. std::vector<int64_t> hist_all(1 << 4, 0);
  8814. std::vector<std::thread> workers;
  8815. workers.reserve(nthread);
  8816. std::mutex mutex;
  8817. int idx = 0;
  8818. std::vector<no_init<uint8_t>> read_data;
  8819. std::vector<no_init<uint8_t>> work;
  8820. std::vector<no_init<float>> f32_conv_buf;
  8821. // populate the original tensors so we get an initial meta data
  8822. for (int i = 0; i < ml.n_tensors; ++i) {
  8823. struct ggml_tensor * meta = ml.get_tensor_meta(i);
  8824. gguf_add_tensor(ctx_out, meta);
  8825. }
  8826. std::ofstream fout(fname_out, std::ios::binary);
  8827. fout.exceptions(std::ofstream::failbit); // fail fast on write errors
  8828. const size_t meta_size = gguf_get_meta_size(ctx_out);
  8829. LLAMA_LOG_INFO("%s: meta size = %zu bytes\n", __func__, meta_size);
  8830. // placeholder for the meta data
  8831. ::zeros(fout, meta_size);
  8832. for (int i = 0; i < ml.n_tensors; ++i) {
  8833. struct ggml_tensor * tensor = ml.get_tensor_meta(i);
  8834. const std::string name = ggml_get_name(tensor);
  8835. if (!ml.use_mmap) {
  8836. if (read_data.size() < ggml_nbytes(tensor)) {
  8837. read_data.resize(ggml_nbytes(tensor));
  8838. }
  8839. tensor->data = read_data.data();
  8840. }
  8841. ml.load_data_for(tensor);
  8842. LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
  8843. ++idx, ml.n_tensors,
  8844. ggml_get_name(tensor),
  8845. llama_format_tensor_shape(tensor).c_str(),
  8846. ggml_type_name(tensor->type));
  8847. // This used to be a regex, but <regex> has an extreme cost to compile times.
  8848. bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
  8849. // quantize only 2D tensors
  8850. quantize &= (ggml_n_dims(tensor) == 2);
  8851. quantize &= params->quantize_output_tensor || name != "output.weight";
  8852. quantize &= !params->only_copy;
  8853. // do not quantize expert gating tensors
  8854. quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight");
  8855. // do not quantize positional embeddings and token types (BERT)
  8856. quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");
  8857. quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight");
  8858. enum ggml_type new_type;
  8859. void * new_data;
  8860. size_t new_size;
  8861. if (quantize) {
  8862. new_type = quantized_type;
  8863. if (!params->pure) {
  8864. new_type = get_k_quant_type(qs, new_type, tensor, ftype);
  8865. }
  8866. // If we've decided to quantize to the same type the tensor is already
  8867. // in then there's nothing to do.
  8868. quantize = tensor->type != new_type;
  8869. }
  8870. if (!quantize) {
  8871. new_type = tensor->type;
  8872. new_data = tensor->data;
  8873. new_size = ggml_nbytes(tensor);
  8874. LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
  8875. } else {
  8876. const size_t nelements = ggml_nelements(tensor);
  8877. const float * imatrix = nullptr;
  8878. if (imatrix_data) {
  8879. auto it = imatrix_data->find(tensor->name);
  8880. if (it == imatrix_data->end()) {
  8881. LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
  8882. } else {
  8883. if (it->second.size() == (size_t)tensor->ne[0]) {
  8884. imatrix = it->second.data();
  8885. } else {
  8886. LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
  8887. int(it->second.size()), int(tensor->ne[0]), tensor->name);
  8888. }
  8889. }
  8890. }
  8891. if ((new_type == GGML_TYPE_IQ2_XXS ||
  8892. new_type == GGML_TYPE_IQ2_XS ||
  8893. (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
  8894. LLAMA_LOG_ERROR("\n\n============================================================\n");
  8895. LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
  8896. LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");
  8897. LLAMA_LOG_ERROR("============================================================\n\n");
  8898. throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name));
  8899. }
  8900. float * f32_data;
  8901. if (tensor->type == GGML_TYPE_F32) {
  8902. f32_data = (float *) tensor->data;
  8903. } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
  8904. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
  8905. } else {
  8906. llama_convert_tensor_internal(tensor, f32_conv_buf, workers, nelements, nthread);
  8907. f32_data = (float *) f32_conv_buf.data();
  8908. }
  8909. LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
  8910. fflush(stdout);
  8911. if (work.size() < nelements * 4) {
  8912. work.resize(nelements * 4); // upper bound on size
  8913. }
  8914. new_data = work.data();
  8915. std::array<int64_t, 1 << 4> hist_cur = {};
  8916. const int n_per_row = tensor->ne[0];
  8917. const int nrows = nelements / n_per_row;
  8918. static const int min_chunk_size = 32 * 512;
  8919. const int chunk_size = n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row);
  8920. const int nchunk = (nelements + chunk_size - 1)/chunk_size;
  8921. const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
  8922. if (nthread_use < 2) {
  8923. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, hist_cur.data(), imatrix);
  8924. } else {
  8925. int counter = 0;
  8926. new_size = 0;
  8927. auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, chunk_size,
  8928. nrows, n_per_row, imatrix]() {
  8929. std::array<int64_t, 1 << 4> local_hist = {};
  8930. const int nrows_per_chunk = chunk_size / n_per_row;
  8931. size_t local_size = 0;
  8932. while (true) {
  8933. std::unique_lock<std::mutex> lock(mutex);
  8934. int first_row = counter; counter += nrows_per_chunk;
  8935. if (first_row >= nrows) {
  8936. if (local_size > 0) {
  8937. for (int j=0; j<int(local_hist.size()); ++j) {
  8938. hist_cur[j] += local_hist[j];
  8939. }
  8940. new_size += local_size;
  8941. }
  8942. break;
  8943. }
  8944. lock.unlock();
  8945. const int this_nrow = std::min(nrows - first_row, nrows_per_chunk);
  8946. local_size += ggml_quantize_chunk(new_type, f32_data, new_data,
  8947. first_row * n_per_row, this_nrow, n_per_row, local_hist.data(), imatrix);
  8948. }
  8949. };
  8950. for (int it = 0; it < nthread_use - 1; ++it) {
  8951. workers.emplace_back(compute);
  8952. }
  8953. compute();
  8954. for (auto & w : workers) { w.join(); }
  8955. workers.clear();
  8956. }
  8957. LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
  8958. int64_t tot_count = 0;
  8959. for (size_t i = 0; i < hist_cur.size(); i++) {
  8960. hist_all[i] += hist_cur[i];
  8961. tot_count += hist_cur[i];
  8962. }
  8963. if (tot_count > 0) {
  8964. LLAMA_LOG_INFO(" | hist: ");
  8965. for (size_t i = 0; i < hist_cur.size(); i++) {
  8966. LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
  8967. }
  8968. }
  8969. LLAMA_LOG_INFO("\n");
  8970. }
  8971. total_size_org += ggml_nbytes(tensor);
  8972. total_size_new += new_size;
  8973. // update the gguf meta data as we go
  8974. gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
  8975. gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
  8976. // write tensor data + padding
  8977. fout.write((const char *) new_data, new_size);
  8978. zeros(fout, GGML_PAD(new_size, align) - new_size);
  8979. }
  8980. // go back to beginning of file and write the updated meta data
  8981. {
  8982. fout.seekp(0);
  8983. std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
  8984. gguf_get_meta_data(ctx_out, data.data());
  8985. fout.write((const char *) data.data(), data.size());
  8986. }
  8987. fout.close();
  8988. gguf_free(ctx_out);
  8989. LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  8990. LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  8991. // print histogram for all tensors
  8992. {
  8993. int64_t sum_all = 0;
  8994. for (size_t i = 0; i < hist_all.size(); i++) {
  8995. sum_all += hist_all[i];
  8996. }
  8997. if (sum_all > 0) {
  8998. LLAMA_LOG_INFO("%s: hist: ", __func__);
  8999. for (size_t i = 0; i < hist_all.size(); i++) {
  9000. LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
  9001. }
  9002. LLAMA_LOG_INFO("\n");
  9003. }
  9004. }
  9005. if (qs.n_fallback > 0) {
  9006. LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) incompatible with k-quants and required fallback quantization\n",
  9007. __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
  9008. }
  9009. }
  9010. static int llama_apply_lora_from_file_internal(
  9011. const struct llama_model & model, const char * path_lora, float scale, const char * path_base_model, int n_threads
  9012. ) {
  9013. LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  9014. const int64_t t_start_lora_us = ggml_time_us();
  9015. llama_file fin(path_lora, "rb");
  9016. // verify magic and version
  9017. {
  9018. uint32_t magic = fin.read_u32();
  9019. if (magic != LLAMA_FILE_MAGIC_GGLA) {
  9020. LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
  9021. return 1;
  9022. }
  9023. uint32_t format_version = fin.read_u32();
  9024. if (format_version != 1) {
  9025. LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
  9026. return 1;
  9027. }
  9028. }
  9029. int32_t lora_r = fin.read_u32();
  9030. int32_t lora_alpha = fin.read_u32();
  9031. float scaling = scale * (float)lora_alpha / (float)lora_r;
  9032. LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  9033. // load base model
  9034. std::unique_ptr<llama_model_loader> ml;
  9035. if (path_base_model) {
  9036. LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
  9037. ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*kv_overrides*/ nullptr));
  9038. ml->init_mapping(/*prefetch*/ false); // no prefetching
  9039. }
  9040. struct tensor_meta {
  9041. std::string name;
  9042. ggml_type type;
  9043. int32_t ne[2];
  9044. size_t offset;
  9045. };
  9046. std::map<std::string, tensor_meta> tensor_meta_map;
  9047. // load all tensor meta
  9048. while (true) {
  9049. if (fin.tell() == fin.size) {
  9050. // eof
  9051. break;
  9052. }
  9053. int32_t n_dims;
  9054. int32_t name_len;
  9055. int32_t ftype;
  9056. fin.read_raw(&n_dims, sizeof(n_dims));
  9057. fin.read_raw(&name_len, sizeof(name_len));
  9058. fin.read_raw(&ftype, sizeof(ftype));
  9059. if (n_dims != 1 && n_dims != 2) {
  9060. LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
  9061. return 1;
  9062. }
  9063. int32_t ne[2] = { 1, 1 };
  9064. for (int i = 0; i < n_dims; ++i) {
  9065. fin.read_raw(&ne[i], sizeof(ne[i]));
  9066. }
  9067. std::string name;
  9068. {
  9069. GGML_ASSERT(name_len < GGML_MAX_NAME);
  9070. char buf[GGML_MAX_NAME];
  9071. fin.read_raw(buf, name_len);
  9072. name = std::string(buf, name_len);
  9073. }
  9074. // check for lora suffix
  9075. std::string lora_suffix;
  9076. if (name.length() > 6) {
  9077. lora_suffix = name.substr(name.length() - 6);
  9078. }
  9079. if (lora_suffix != ".loraA" && lora_suffix != ".loraB") {
  9080. LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  9081. return 1;
  9082. }
  9083. // tensor type
  9084. ggml_type wtype;
  9085. switch (ftype) {
  9086. case 0: wtype = GGML_TYPE_F32; break;
  9087. case 1: wtype = GGML_TYPE_F16; break;
  9088. default:
  9089. {
  9090. LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
  9091. __func__, ftype);
  9092. return false;
  9093. }
  9094. }
  9095. // data offset
  9096. size_t offset = fin.tell();
  9097. offset = (offset + 31) & -32;
  9098. // skip tensor data
  9099. fin.seek(offset + ggml_row_size(wtype, ne[0]) * ne[1], SEEK_SET);
  9100. tensor_meta_map.emplace(name, tensor_meta{ name, wtype, { ne[0], ne[1] }, offset });
  9101. }
  9102. bool warned = false;
  9103. int n_tensors = 0;
  9104. // apply
  9105. ggml_backend_t backend_cpu = ggml_backend_cpu_init();
  9106. if (backend_cpu == nullptr) {
  9107. LLAMA_LOG_ERROR("%s: error: failed to initialize cpu backend\n", __func__);
  9108. return 1;
  9109. }
  9110. ggml_backend_cpu_set_n_threads(backend_cpu, n_threads);
  9111. std::vector<no_init<uint8_t>> read_buf;
  9112. for (const auto & it : model.tensors_by_name) {
  9113. const std::string & base_name = it.first;
  9114. ggml_tensor * model_t = it.second;
  9115. if (tensor_meta_map.find(base_name + ".loraA") == tensor_meta_map.end() ||
  9116. tensor_meta_map.find(base_name + ".loraB") == tensor_meta_map.end()) {
  9117. continue;
  9118. }
  9119. tensor_meta & metaA = tensor_meta_map.at(base_name + ".loraA");
  9120. tensor_meta & metaB = tensor_meta_map.at(base_name + ".loraB");
  9121. ggml_init_params lora_init_params = {
  9122. /* .mem_size */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
  9123. /* .mem_buffer */ nullptr,
  9124. /* .no_alloc */ true,
  9125. };
  9126. ggml_context * lora_ctx = ggml_init(lora_init_params);
  9127. if (lora_ctx == nullptr) {
  9128. LLAMA_LOG_ERROR("%s: error: failed to initialize lora context\n", __func__);
  9129. ggml_backend_free(backend_cpu);
  9130. return 1;
  9131. }
  9132. // create tensors
  9133. ggml_tensor * loraA = ggml_new_tensor_2d(lora_ctx, metaA.type, metaA.ne[0], metaA.ne[1]);
  9134. ggml_tensor * loraB = ggml_new_tensor_2d(lora_ctx, metaB.type, metaB.ne[0], metaB.ne[1]);
  9135. ggml_set_name(loraA, metaA.name.c_str());
  9136. ggml_set_name(loraB, metaB.name.c_str());
  9137. ggml_tensor * base_t;
  9138. if (ml) {
  9139. if (gguf_find_tensor(ml->ctx_gguf, base_name.c_str()) < 0) {
  9140. LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  9141. return 1;
  9142. }
  9143. base_t = ggml_dup_tensor(lora_ctx, ml->get_tensor_meta(base_name.c_str()));
  9144. } else {
  9145. base_t = ggml_dup_tensor(lora_ctx, model_t);
  9146. }
  9147. ggml_set_name(base_t, base_name.c_str());
  9148. // allocate in backend buffer
  9149. ggml_backend_buffer_t lora_buf = ggml_backend_alloc_ctx_tensors_from_buft(lora_ctx, ggml_backend_cpu_buffer_type());
  9150. if (lora_buf == nullptr) {
  9151. LLAMA_LOG_ERROR("%s: error: failed to allocate lora tensors\n", __func__);
  9152. return 1;
  9153. }
  9154. // load tensor data
  9155. auto load_tensor = [&read_buf, &fin](const tensor_meta & tensor_meta, ggml_tensor * tensor) {
  9156. read_buf.resize(ggml_nbytes(tensor));
  9157. fin.seek(tensor_meta.offset, SEEK_SET);
  9158. fin.read_raw(read_buf.data(), ggml_nbytes(tensor));
  9159. ggml_backend_tensor_set(tensor, read_buf.data(), 0, read_buf.size());
  9160. };
  9161. load_tensor(metaA, loraA);
  9162. load_tensor(metaB, loraB);
  9163. // load base model tensor data
  9164. if (ml) {
  9165. ml->load_data_for(base_t);
  9166. } else {
  9167. ggml_backend_tensor_copy(model_t, base_t);
  9168. }
  9169. if (ggml_is_quantized(base_t->type) && !warned) {
  9170. LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  9171. "use a f16 or f32 base model with --lora-base\n", __func__);
  9172. warned = true;
  9173. }
  9174. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  9175. LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  9176. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  9177. ggml_free(lora_ctx);
  9178. ggml_backend_buffer_free(lora_buf);
  9179. ggml_backend_free(backend_cpu);
  9180. return 1;
  9181. }
  9182. auto build_lora_graph = [&]() {
  9183. // w = w + BA*s
  9184. ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
  9185. ggml_set_name(BA, "BA");
  9186. if (scaling != 1.0f) {
  9187. BA = ggml_scale(lora_ctx, BA, scaling);
  9188. ggml_set_name(BA, "BA_scaled");
  9189. }
  9190. ggml_tensor * r;
  9191. r = ggml_add_inplace(lora_ctx, base_t, BA);
  9192. ggml_set_name(r, "r_add");
  9193. if (base_t->type != model_t->type) {
  9194. // convert the result to the model type
  9195. r = ggml_cast(lora_ctx, r, model_t->type);
  9196. ggml_set_name(r, "r_cast");
  9197. }
  9198. return r;
  9199. };
  9200. ggml_cgraph * gf = ggml_new_graph(lora_ctx);
  9201. ggml_tensor * r = build_lora_graph();
  9202. ggml_build_forward_expand(gf, r);
  9203. ggml_backend_buffer_t graph_buf = ggml_backend_alloc_ctx_tensors_from_buft(lora_ctx, ggml_backend_cpu_buffer_type());
  9204. if (graph_buf == nullptr) {
  9205. LLAMA_LOG_ERROR("%s: error: failed to allocate graph tensors\n", __func__);
  9206. ggml_free(lora_ctx);
  9207. ggml_backend_buffer_free(lora_buf);
  9208. ggml_backend_free(backend_cpu);
  9209. return 1;
  9210. }
  9211. ggml_backend_graph_compute(backend_cpu, gf);
  9212. ggml_backend_tensor_set(model_t, r->data, 0, ggml_nbytes(r));
  9213. #if 0
  9214. // TODO: use scheduler with fallback to CPU for less copies between CPU and GPU
  9215. //ggml_backend_sched_t sched = ggml_backend_sched_new(backends.data(), backends.size(), GGML_DEFAULT_GRAPH_SIZE);
  9216. // sched compute
  9217. ggml_build_forward_expand(gf, build_graph());
  9218. ggml_backend_sched_init_measure(sched, gf);
  9219. // create the graph again, since the previous one was destroyed by the measure
  9220. ggml_graph_clear(gf);
  9221. ggml_build_forward_expand(gf, build_graph());
  9222. ggml_backend_sched_graph_compute(sched, gf);
  9223. ggml_backend_sched_free(sched);
  9224. #endif
  9225. ggml_backend_buffer_free(lora_buf);
  9226. ggml_backend_buffer_free(graph_buf);
  9227. ggml_free(lora_ctx);
  9228. n_tensors++;
  9229. if (n_tensors % 4 == 0) {
  9230. LLAMA_LOG_INFO(".");
  9231. }
  9232. }
  9233. ggml_backend_free(backend_cpu);
  9234. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  9235. LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
  9236. return 0;
  9237. }
  9238. //
  9239. // interface implementation
  9240. //
  9241. struct llama_model_params llama_model_default_params() {
  9242. struct llama_model_params result = {
  9243. /*.n_gpu_layers =*/ 0,
  9244. /*.split_mode =*/ LLAMA_SPLIT_LAYER,
  9245. /*.main_gpu =*/ 0,
  9246. /*.tensor_split =*/ nullptr,
  9247. /*.progress_callback =*/ nullptr,
  9248. /*.progress_callback_user_data =*/ nullptr,
  9249. /*.kv_overrides =*/ nullptr,
  9250. /*.vocab_only =*/ false,
  9251. /*.use_mmap =*/ true,
  9252. /*.use_mlock =*/ false,
  9253. };
  9254. #ifdef GGML_USE_METAL
  9255. // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
  9256. result.n_gpu_layers = 999;
  9257. #endif
  9258. return result;
  9259. }
  9260. struct llama_context_params llama_context_default_params() {
  9261. struct llama_context_params result = {
  9262. /*.seed =*/ LLAMA_DEFAULT_SEED,
  9263. /*.n_ctx =*/ 512,
  9264. /*.n_batch =*/ 512,
  9265. /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
  9266. /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
  9267. /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_UNSPECIFIED,
  9268. /*.rope_freq_base =*/ 0.0f,
  9269. /*.rope_freq_scale =*/ 0.0f,
  9270. /*.yarn_ext_factor =*/ -1.0f,
  9271. /*.yarn_attn_factor =*/ 1.0f,
  9272. /*.yarn_beta_fast =*/ 32.0f,
  9273. /*.yarn_beta_slow =*/ 1.0f,
  9274. /*.yarn_orig_ctx =*/ 0,
  9275. /*.cb_eval =*/ nullptr,
  9276. /*.cb_eval_user_data =*/ nullptr,
  9277. /*.type_k =*/ GGML_TYPE_F16,
  9278. /*.type_v =*/ GGML_TYPE_F16,
  9279. /*.mul_mat_q =*/ true,
  9280. /*.logits_all =*/ false,
  9281. /*.embedding =*/ false,
  9282. /*.offload_kqv =*/ true,
  9283. /*.do_pooling =*/ true,
  9284. };
  9285. return result;
  9286. }
  9287. struct llama_model_quantize_params llama_model_quantize_default_params() {
  9288. struct llama_model_quantize_params result = {
  9289. /*.nthread =*/ 0,
  9290. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  9291. /*.allow_requantize =*/ false,
  9292. /*.quantize_output_tensor =*/ true,
  9293. /*.only_copy =*/ false,
  9294. /*.pure =*/ false,
  9295. /*.imatrix =*/ nullptr,
  9296. };
  9297. return result;
  9298. }
  9299. size_t llama_max_devices(void) {
  9300. #if defined(GGML_USE_METAL)
  9301. return 1;
  9302. #elif defined(GGML_USE_CUBLAS)
  9303. return GGML_CUDA_MAX_DEVICES;
  9304. #elif defined(GGML_USE_SYCL)
  9305. return GGML_SYCL_MAX_DEVICES;
  9306. #elif defined(GGML_USE_VULKAN)
  9307. return GGML_VK_MAX_DEVICES;
  9308. #else
  9309. return 1;
  9310. #endif
  9311. }
  9312. bool llama_supports_mmap(void) {
  9313. return llama_mmap::SUPPORTED;
  9314. }
  9315. bool llama_supports_mlock(void) {
  9316. return llama_mlock::SUPPORTED;
  9317. }
  9318. bool llama_supports_gpu_offload(void) {
  9319. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \
  9320. defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE)
  9321. // Defined when llama.cpp is compiled with support for offloading model layers to GPU.
  9322. return true;
  9323. #else
  9324. return false;
  9325. #endif
  9326. }
  9327. // deprecated:
  9328. bool llama_mmap_supported(void) {
  9329. return llama_supports_mmap();
  9330. }
  9331. bool llama_mlock_supported(void) {
  9332. return llama_supports_mlock();
  9333. }
  9334. void llama_backend_init(bool numa) {
  9335. ggml_time_init();
  9336. // needed to initialize f16 tables
  9337. {
  9338. struct ggml_init_params params = { 0, NULL, false };
  9339. struct ggml_context * ctx = ggml_init(params);
  9340. ggml_free(ctx);
  9341. }
  9342. if (numa) {
  9343. ggml_numa_init();
  9344. }
  9345. #ifdef GGML_USE_MPI
  9346. ggml_mpi_backend_init();
  9347. #endif
  9348. }
  9349. void llama_backend_free(void) {
  9350. #ifdef GGML_USE_MPI
  9351. ggml_mpi_backend_free();
  9352. #endif
  9353. ggml_quantize_free();
  9354. }
  9355. int64_t llama_time_us(void) {
  9356. return ggml_time_us();
  9357. }
  9358. struct llama_model * llama_load_model_from_file(
  9359. const char * path_model,
  9360. struct llama_model_params params) {
  9361. ggml_time_init();
  9362. llama_model * model = new llama_model;
  9363. unsigned cur_percentage = 0;
  9364. if (params.progress_callback == NULL) {
  9365. params.progress_callback_user_data = &cur_percentage;
  9366. params.progress_callback = [](float progress, void * ctx) {
  9367. unsigned * cur_percentage_p = (unsigned *) ctx;
  9368. unsigned percentage = (unsigned) (100 * progress);
  9369. while (percentage > *cur_percentage_p) {
  9370. *cur_percentage_p = percentage;
  9371. LLAMA_LOG_INFO(".");
  9372. if (percentage >= 100) {
  9373. LLAMA_LOG_INFO("\n");
  9374. }
  9375. }
  9376. return true;
  9377. };
  9378. }
  9379. int status = llama_model_load(path_model, *model, params);
  9380. GGML_ASSERT(status <= 0);
  9381. if (status < 0) {
  9382. if (status == -1) {
  9383. LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  9384. } else if (status == -2) {
  9385. LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  9386. }
  9387. delete model;
  9388. return nullptr;
  9389. }
  9390. return model;
  9391. }
  9392. void llama_free_model(struct llama_model * model) {
  9393. delete model;
  9394. }
  9395. struct llama_context * llama_new_context_with_model(
  9396. struct llama_model * model,
  9397. struct llama_context_params params) {
  9398. if (!model) {
  9399. return nullptr;
  9400. }
  9401. llama_context * ctx = new llama_context(*model);
  9402. const auto & hparams = model->hparams;
  9403. auto & cparams = ctx->cparams;
  9404. cparams.n_batch = params.n_batch;
  9405. cparams.n_threads = params.n_threads;
  9406. cparams.n_threads_batch = params.n_threads_batch;
  9407. cparams.yarn_ext_factor = params.yarn_ext_factor;
  9408. cparams.yarn_attn_factor = params.yarn_attn_factor;
  9409. cparams.yarn_beta_fast = params.yarn_beta_fast;
  9410. cparams.yarn_beta_slow = params.yarn_beta_slow;
  9411. cparams.mul_mat_q = params.mul_mat_q;
  9412. cparams.offload_kqv = params.offload_kqv;
  9413. cparams.do_pooling = params.do_pooling;
  9414. cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
  9415. cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
  9416. cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
  9417. cparams.n_yarn_orig_ctx = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
  9418. hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx :
  9419. hparams.n_ctx_train;
  9420. cparams.cb_eval = params.cb_eval;
  9421. cparams.cb_eval_user_data = params.cb_eval_user_data;
  9422. auto rope_scaling_type = params.rope_scaling_type;
  9423. if (rope_scaling_type == LLAMA_ROPE_SCALING_UNSPECIFIED) {
  9424. rope_scaling_type = hparams.rope_scaling_type_train;
  9425. }
  9426. if (rope_scaling_type == LLAMA_ROPE_SCALING_NONE) {
  9427. cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
  9428. }
  9429. if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
  9430. cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_YARN ? 1.0f : 0.0f;
  9431. }
  9432. if (params.seed == LLAMA_DEFAULT_SEED) {
  9433. params.seed = time(NULL);
  9434. }
  9435. LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
  9436. LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
  9437. LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
  9438. ctx->rng = std::mt19937(params.seed);
  9439. ctx->logits_all = params.logits_all;
  9440. const ggml_type type_k = params.type_k;
  9441. const ggml_type type_v = params.type_v;
  9442. GGML_ASSERT(hparams.n_embd_head_k % ggml_blck_size(type_k) == 0);
  9443. GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0);
  9444. if (!hparams.vocab_only) {
  9445. // initialize backends
  9446. #ifdef GGML_USE_METAL
  9447. if (model->n_gpu_layers > 0) {
  9448. ctx->backend_metal = ggml_backend_metal_init();
  9449. if (ctx->backend_metal == nullptr) {
  9450. LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__);
  9451. llama_free(ctx);
  9452. return nullptr;
  9453. }
  9454. ctx->backends.push_back(ctx->backend_metal);
  9455. }
  9456. #elif defined(GGML_USE_CUBLAS)
  9457. if (model->n_gpu_layers > 0) {
  9458. // with split_mode LLAMA_SPLIT_NONE or LLAMA_SPLIT_ROW, only the main GPU backend is used
  9459. if (model->split_mode == LLAMA_SPLIT_NONE || model->split_mode == LLAMA_SPLIT_ROW) {
  9460. ggml_backend_t backend = ggml_backend_cuda_init(model->main_gpu);
  9461. if (backend == nullptr) {
  9462. LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, model->main_gpu);
  9463. llama_free(ctx);
  9464. return nullptr;
  9465. }
  9466. ctx->backends.push_back(backend);
  9467. } else {
  9468. // LLAMA_SPLIT_LAYER requires a backend for each GPU
  9469. for (int device = 0; device < ggml_backend_cuda_get_device_count(); ++device) {
  9470. ggml_backend_t backend = ggml_backend_cuda_init(device);
  9471. if (backend == nullptr) {
  9472. LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, device);
  9473. llama_free(ctx);
  9474. return nullptr;
  9475. }
  9476. ctx->backends.push_back(backend);
  9477. }
  9478. }
  9479. }
  9480. #elif defined(GGML_USE_VULKAN)
  9481. if (model->n_gpu_layers > 0) {
  9482. for (int device = 0; device < ggml_backend_vk_get_device_count(); ++device) {
  9483. ggml_backend_t backend = ggml_backend_vk_init(device);
  9484. if (backend == nullptr) {
  9485. LLAMA_LOG_ERROR("%s: failed to initialize Vulkan%d backend\n", __func__, device);
  9486. llama_free(ctx);
  9487. return nullptr;
  9488. }
  9489. ctx->backends.push_back(backend);
  9490. }
  9491. }
  9492. #elif defined(GGML_USE_SYCL)
  9493. if (model->n_gpu_layers > 0) {
  9494. ggml_backend_t backend = ggml_backend_sycl_init(model->main_gpu);
  9495. if (backend == nullptr) {
  9496. LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d backend\n", __func__, model->main_gpu);
  9497. llama_free(ctx);
  9498. return nullptr;
  9499. }
  9500. ctx->backends.push_back(backend);
  9501. }
  9502. #elif defined(GGML_USE_KOMPUTE)
  9503. if (model->n_gpu_layers > 0) {
  9504. auto * backend = ggml_backend_kompute_init(model->main_gpu);
  9505. if (backend == nullptr) {
  9506. LLAMA_LOG_ERROR("%s: failed to initialize Kompute backend\n", __func__);
  9507. llama_free(ctx);
  9508. return nullptr;
  9509. }
  9510. ctx->backends.push_back(backend);
  9511. }
  9512. #endif
  9513. ctx->backend_cpu = ggml_backend_cpu_init();
  9514. if (ctx->backend_cpu == nullptr) {
  9515. LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__);
  9516. llama_free(ctx);
  9517. return nullptr;
  9518. }
  9519. ctx->backends.push_back(ctx->backend_cpu);
  9520. if (!llama_kv_cache_init(ctx->kv_self, ctx->model, type_k, type_v,
  9521. cparams.n_ctx, cparams.offload_kqv)) {
  9522. LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
  9523. llama_free(ctx);
  9524. return nullptr;
  9525. }
  9526. {
  9527. size_t memory_size_k = 0;
  9528. size_t memory_size_v = 0;
  9529. for (auto & k : ctx->kv_self.k_l) {
  9530. memory_size_k += ggml_nbytes(k);
  9531. }
  9532. for (auto & v : ctx->kv_self.v_l) {
  9533. memory_size_v += ggml_nbytes(v);
  9534. }
  9535. LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
  9536. (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
  9537. ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
  9538. ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
  9539. }
  9540. // resized during inference, reserve maximum
  9541. ctx->logits.reserve(hparams.n_vocab*cparams.n_batch);
  9542. if (params.embedding) {
  9543. ctx->embedding.resize(hparams.n_embd);
  9544. }
  9545. // graph inputs
  9546. {
  9547. ggml_init_params init_params = {
  9548. /* .mem_size */ ggml_tensor_overhead()*7,
  9549. /* .mem_buffer */ nullptr,
  9550. /* .no_alloc */ true,
  9551. };
  9552. ctx->ctx_input = ggml_init(init_params);
  9553. ctx->inp_tokens = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
  9554. ctx->inp_embd = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, hparams.n_embd, cparams.n_batch);
  9555. ctx->inp_pos = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
  9556. ctx->inp_KQ_mask = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, cparams.n_ctx, cparams.n_batch);
  9557. ctx->inp_K_shift = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_ctx);
  9558. ctx->inp_sum = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, cparams.n_batch, cparams.n_batch);
  9559. ggml_set_name(ctx->inp_tokens, "inp_tokens");
  9560. ggml_set_name(ctx->inp_embd, "inp_embd");
  9561. ggml_set_name(ctx->inp_pos, "inp_pos");
  9562. ggml_set_name(ctx->inp_KQ_mask, "inp_KQ_mask");
  9563. ggml_set_name(ctx->inp_K_shift, "inp_K_shift");
  9564. ggml_set_name(ctx->inp_sum, "inp_sum");
  9565. ctx->buf_input = ggml_backend_alloc_ctx_tensors_from_buft(ctx->ctx_input, llama_default_buffer_type_cpu(true));
  9566. LLAMA_LOG_INFO("%s: %10s input buffer size = %8.2f MiB\n", __func__,
  9567. ggml_backend_buffer_name(ctx->buf_input),
  9568. ggml_backend_buffer_get_size(ctx->buf_input) / 1024.0 / 1024.0);
  9569. }
  9570. // scheduler and compute buffers
  9571. {
  9572. // buffer types used for the compute buffer of each backend
  9573. std::vector<ggml_backend_buffer_type_t> backend_buft;
  9574. for (auto * backend : ctx->backends) {
  9575. if (ggml_backend_is_cpu(backend)) {
  9576. // use host buffers for the CPU backend compute buffer
  9577. backend_buft.push_back(llama_default_buffer_type_cpu(true));
  9578. } else {
  9579. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  9580. }
  9581. }
  9582. // buffer used to store the computation graph and the tensor meta data
  9583. ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead());
  9584. ctx->sched = ggml_backend_sched_new(ctx->backends.data(), backend_buft.data(), ctx->backends.size(), LLAMA_MAX_NODES);
  9585. // build worst-case graph
  9586. int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_batch);
  9587. int n_past = cparams.n_ctx - n_tokens;
  9588. llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  9589. ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0), true);
  9590. // initialize scheduler with the worst-case graph
  9591. if (!ggml_backend_sched_reserve(ctx->sched, gf)) {
  9592. LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
  9593. llama_free(ctx);
  9594. return nullptr;
  9595. }
  9596. for (size_t i = 0; i < ctx->backends.size(); i++) {
  9597. ggml_backend_t backend = ctx->backends[i];
  9598. ggml_backend_buffer_type_t buft = backend_buft[i];
  9599. size_t size = ggml_backend_sched_get_buffer_size(ctx->sched, backend);
  9600. LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  9601. ggml_backend_buft_name(buft),
  9602. size / 1024.0 / 1024.0);
  9603. }
  9604. // note: the number of splits during measure is higher than during inference due to the kv shift
  9605. int n_splits = ggml_backend_sched_get_n_splits(ctx->sched);
  9606. LLAMA_LOG_INFO("%s: graph splits (measure): %d\n", __func__, n_splits);
  9607. }
  9608. }
  9609. #ifdef GGML_USE_MPI
  9610. ctx->ctx_mpi = ggml_mpi_init();
  9611. if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
  9612. // Enter a blocking eval loop with dummy input, letting rank=0 drive the process
  9613. // TODO: needs fix after #3228
  9614. GGML_ASSERT(false && "not implemented");
  9615. //const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
  9616. //while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
  9617. llama_backend_free();
  9618. exit(1);
  9619. }
  9620. #endif
  9621. return ctx;
  9622. }
  9623. void llama_free(struct llama_context * ctx) {
  9624. delete ctx;
  9625. }
  9626. const llama_model * llama_get_model(const struct llama_context * ctx) {
  9627. return &ctx->model;
  9628. }
  9629. uint32_t llama_n_ctx(const struct llama_context * ctx) {
  9630. return ctx->cparams.n_ctx;
  9631. }
  9632. uint32_t llama_n_batch(const struct llama_context * ctx) {
  9633. return ctx->cparams.n_batch;
  9634. }
  9635. enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
  9636. return model->vocab.type;
  9637. }
  9638. int32_t llama_n_vocab(const struct llama_model * model) {
  9639. return model->vocab.id_to_token.size();
  9640. }
  9641. int32_t llama_n_ctx_train(const struct llama_model * model) {
  9642. return model->hparams.n_ctx_train;
  9643. }
  9644. int32_t llama_n_embd(const struct llama_model * model) {
  9645. return model->hparams.n_embd;
  9646. }
  9647. float llama_rope_freq_scale_train(const struct llama_model * model) {
  9648. return model->hparams.rope_freq_scale_train;
  9649. }
  9650. int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
  9651. const auto & it = model->gguf_kv.find(key);
  9652. if (it == model->gguf_kv.end()) {
  9653. if (buf_size > 0) {
  9654. buf[0] = '\0';
  9655. }
  9656. return -1;
  9657. }
  9658. return snprintf(buf, buf_size, "%s", it->second.c_str());
  9659. }
  9660. int32_t llama_model_meta_count(const struct llama_model * model) {
  9661. return (int)model->gguf_kv.size();
  9662. }
  9663. int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
  9664. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  9665. if (buf_size > 0) {
  9666. buf[0] = '\0';
  9667. }
  9668. return -1;
  9669. }
  9670. auto it = model->gguf_kv.begin();
  9671. std::advance(it, i);
  9672. return snprintf(buf, buf_size, "%s", it->first.c_str());
  9673. }
  9674. int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
  9675. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  9676. if (buf_size > 0) {
  9677. buf[0] = '\0';
  9678. }
  9679. return -1;
  9680. }
  9681. auto it = model->gguf_kv.begin();
  9682. std::advance(it, i);
  9683. return snprintf(buf, buf_size, "%s", it->second.c_str());
  9684. }
  9685. int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
  9686. return snprintf(buf, buf_size, "%s %s %s",
  9687. llama_model_arch_name(model->arch),
  9688. llama_model_type_name(model->type),
  9689. llama_model_ftype_name(model->ftype).c_str());
  9690. }
  9691. uint64_t llama_model_size(const struct llama_model * model) {
  9692. uint64_t size = 0;
  9693. for (const auto & it : model->tensors_by_name) {
  9694. size += ggml_nbytes(it.second);
  9695. }
  9696. return size;
  9697. }
  9698. uint64_t llama_model_n_params(const struct llama_model * model) {
  9699. uint64_t nparams = 0;
  9700. for (const auto & it : model->tensors_by_name) {
  9701. nparams += ggml_nelements(it.second);
  9702. }
  9703. return nparams;
  9704. }
  9705. struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
  9706. auto it = std::find_if(model->tensors_by_name.begin(), model->tensors_by_name.end(),
  9707. [name](const std::pair<std::string, struct ggml_tensor *> & it) {
  9708. return it.first == name;
  9709. });
  9710. if (it == model->tensors_by_name.end()) {
  9711. return nullptr;
  9712. }
  9713. return it->second;
  9714. }
  9715. uint32_t llama_model_quantize(
  9716. const char * fname_inp,
  9717. const char * fname_out,
  9718. const llama_model_quantize_params * params) {
  9719. try {
  9720. llama_model_quantize_internal(fname_inp, fname_out, params);
  9721. return 0;
  9722. } catch (const std::exception & err) {
  9723. LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
  9724. return 1;
  9725. }
  9726. }
  9727. int32_t llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, float scale, const char * path_base_model, int32_t n_threads) {
  9728. try {
  9729. return llama_apply_lora_from_file_internal(ctx->model, path_lora, scale, path_base_model, n_threads);
  9730. } catch (const std::exception & err) {
  9731. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  9732. return 1;
  9733. }
  9734. }
  9735. int32_t llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, float scale, const char * path_base_model, int32_t n_threads) {
  9736. try {
  9737. return llama_apply_lora_from_file_internal(*model, path_lora, scale, path_base_model, n_threads);
  9738. } catch (const std::exception & err) {
  9739. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  9740. return 1;
  9741. }
  9742. }
  9743. struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_max_seq) {
  9744. struct llama_kv_cache_view result = {
  9745. /*.n_cells = */ 0,
  9746. /*.n_max_seq = */ n_max_seq,
  9747. /*.token_count = */ 0,
  9748. /*.used_cells = */ llama_get_kv_cache_used_cells(ctx),
  9749. /*.max_contiguous = */ 0,
  9750. /*.max_contiguous_idx = */ -1,
  9751. /*.cells = */ nullptr,
  9752. /*.cells_sequences = */ nullptr,
  9753. };
  9754. return result;
  9755. }
  9756. void llama_kv_cache_view_free(struct llama_kv_cache_view * view) {
  9757. if (view->cells != nullptr) {
  9758. free(view->cells);
  9759. view->cells = nullptr;
  9760. }
  9761. if (view->cells_sequences != nullptr) {
  9762. free(view->cells_sequences);
  9763. view->cells_sequences = nullptr;
  9764. }
  9765. }
  9766. void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) {
  9767. if (uint32_t(view->n_cells) < ctx->kv_self.size || view->cells == nullptr) {
  9768. view->n_cells = int32_t(ctx->kv_self.size);
  9769. void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells);
  9770. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells");
  9771. view->cells = (struct llama_kv_cache_view_cell *)p;
  9772. p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_max_seq * view->n_cells);
  9773. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences");
  9774. view->cells_sequences = (llama_seq_id *)p;
  9775. }
  9776. const std::vector<llama_kv_cell> & kv_cells = ctx->kv_self.cells;
  9777. llama_kv_cache_view_cell * c_curr = view->cells;
  9778. llama_seq_id * cs_curr = view->cells_sequences;
  9779. int32_t used_cells = 0;
  9780. int32_t token_count = 0;
  9781. int32_t curr_contig_idx = -1;
  9782. uint32_t max_contig = 0;
  9783. int32_t max_contig_idx = -1;
  9784. for (int32_t i = 0; i < int32_t(ctx->kv_self.size); i++, c_curr++, cs_curr += view->n_max_seq) {
  9785. const size_t curr_size = kv_cells[i].seq_id.size();
  9786. token_count += curr_size;
  9787. c_curr->pos = kv_cells[i].pos + kv_cells[i].delta;
  9788. if (curr_size > 0) {
  9789. if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) {
  9790. max_contig = i - curr_contig_idx;
  9791. max_contig_idx = curr_contig_idx;
  9792. }
  9793. curr_contig_idx = -1;
  9794. } else if (curr_contig_idx < 0) {
  9795. curr_contig_idx = i;
  9796. }
  9797. int seq_idx = 0;
  9798. for (const llama_seq_id it : kv_cells[i].seq_id) {
  9799. if (seq_idx >= view->n_max_seq) {
  9800. break;
  9801. }
  9802. cs_curr[seq_idx] = it;
  9803. seq_idx++;
  9804. }
  9805. if (seq_idx != 0) {
  9806. used_cells++;
  9807. }
  9808. for (; seq_idx < view->n_max_seq; seq_idx++) {
  9809. cs_curr[seq_idx] = -1;
  9810. }
  9811. }
  9812. if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) {
  9813. max_contig_idx = curr_contig_idx;
  9814. max_contig = kv_cells.size() - curr_contig_idx;
  9815. }
  9816. view->max_contiguous = max_contig;
  9817. view->max_contiguous_idx = max_contig_idx;
  9818. view->token_count = token_count;
  9819. view->used_cells = used_cells;
  9820. if (uint32_t(used_cells) != ctx->kv_self.used) {
  9821. LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n",
  9822. __func__, ctx->kv_self.used, used_cells);
  9823. }
  9824. }
  9825. int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  9826. int result = 0;
  9827. for (uint32_t i = 0; i < ctx->kv_self.size; i++) {
  9828. result += ctx->kv_self.cells[i].seq_id.size();
  9829. }
  9830. return result;
  9831. }
  9832. int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx) {
  9833. return ctx->kv_self.used;
  9834. }
  9835. void llama_kv_cache_clear(struct llama_context * ctx) {
  9836. llama_kv_cache_clear(ctx->kv_self);
  9837. }
  9838. void llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  9839. llama_kv_cache_seq_rm(ctx->kv_self, seq_id, p0, p1);
  9840. }
  9841. void llama_kv_cache_seq_cp(struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  9842. if (seq_id_src == seq_id_dst) {
  9843. return;
  9844. }
  9845. llama_kv_cache_seq_cp(ctx->kv_self, seq_id_src, seq_id_dst, p0, p1);
  9846. }
  9847. void llama_kv_cache_seq_keep(struct llama_context * ctx, llama_seq_id seq_id) {
  9848. llama_kv_cache_seq_keep(ctx->kv_self, seq_id);
  9849. }
  9850. void llama_kv_cache_seq_shift(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) {
  9851. if (delta == 0) {
  9852. return;
  9853. }
  9854. llama_kv_cache_seq_shift(ctx->kv_self, seq_id, p0, p1, delta);
  9855. }
  9856. void llama_kv_cache_seq_div(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
  9857. if (d == 1) {
  9858. return;
  9859. }
  9860. llama_kv_cache_seq_div(ctx->kv_self, seq_id, p0, p1, d);
  9861. }
  9862. // Returns the *maximum* size of the state
  9863. size_t llama_get_state_size(const struct llama_context * ctx) {
  9864. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  9865. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  9866. const size_t s_rng_size = sizeof(size_t);
  9867. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  9868. const size_t s_logits_size = sizeof(size_t);
  9869. // assume worst case for logits although only currently set ones are serialized
  9870. const size_t s_logits = ctx->logits.capacity() * sizeof(float);
  9871. const size_t s_embedding_size = sizeof(size_t);
  9872. const size_t s_embedding = ctx->embedding.size() * sizeof(float);
  9873. const size_t s_kv_size = sizeof(size_t);
  9874. const size_t s_kv_ntok = sizeof(int);
  9875. const size_t s_kv = ctx->kv_self.total_size();
  9876. const size_t s_total = (
  9877. + s_rng_size
  9878. + s_rng
  9879. + s_logits_size
  9880. + s_logits
  9881. + s_embedding_size
  9882. + s_embedding
  9883. + s_kv_size
  9884. + s_kv_ntok
  9885. + s_kv
  9886. );
  9887. return s_total;
  9888. }
  9889. // llama_context_data
  9890. struct llama_data_context {
  9891. virtual void write(const void * src, size_t size) = 0;
  9892. virtual size_t get_size_written() = 0;
  9893. virtual ~llama_data_context() = default;
  9894. };
  9895. struct llama_data_buffer_context : llama_data_context {
  9896. uint8_t * ptr;
  9897. size_t size_written = 0;
  9898. llama_data_buffer_context(uint8_t * p) : ptr(p) {}
  9899. void write(const void * src, size_t size) override {
  9900. memcpy(ptr, src, size);
  9901. ptr += size;
  9902. size_written += size;
  9903. }
  9904. size_t get_size_written() override {
  9905. return size_written;
  9906. }
  9907. };
  9908. struct llama_data_file_context : llama_data_context {
  9909. llama_file * file;
  9910. size_t size_written = 0;
  9911. llama_data_file_context(llama_file * f) : file(f) {}
  9912. void write(const void * src, size_t size) override {
  9913. file->write_raw(src, size);
  9914. size_written += size;
  9915. }
  9916. size_t get_size_written() override {
  9917. return size_written;
  9918. }
  9919. };
  9920. /** copy state data into either a buffer or file depending on the passed in context
  9921. *
  9922. * file context:
  9923. * llama_file file("/path", "wb");
  9924. * llama_data_file_context data_ctx(&file);
  9925. * llama_copy_state_data(ctx, &data_ctx);
  9926. *
  9927. * buffer context:
  9928. * std::vector<uint8_t> buf(max_size, 0);
  9929. * llama_data_buffer_context data_ctx(&buf.data());
  9930. * llama_copy_state_data(ctx, &data_ctx);
  9931. *
  9932. */
  9933. static void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
  9934. // copy rng
  9935. {
  9936. std::ostringstream rng_ss;
  9937. rng_ss << ctx->rng;
  9938. const std::string & rng_str = rng_ss.str();
  9939. const size_t rng_size = rng_str.size();
  9940. GGML_ASSERT(rng_size <= LLAMA_MAX_RNG_STATE);
  9941. data_ctx->write(&rng_size, sizeof(rng_size));
  9942. data_ctx->write(rng_str.data(), rng_size);
  9943. }
  9944. // copy logits
  9945. {
  9946. const size_t logits_size = ctx->logits.size();
  9947. data_ctx->write(&logits_size, sizeof(logits_size));
  9948. if (logits_size) {
  9949. data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
  9950. }
  9951. }
  9952. // copy embeddings
  9953. {
  9954. const size_t embedding_size = ctx->embedding.size();
  9955. data_ctx->write(&embedding_size, sizeof(embedding_size));
  9956. if (embedding_size) {
  9957. data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float));
  9958. }
  9959. }
  9960. // copy kv cache
  9961. {
  9962. const auto & kv_self = ctx->kv_self;
  9963. const auto & hparams = ctx->model.hparams;
  9964. const auto & cparams = ctx->cparams;
  9965. const auto n_layer = hparams.n_layer;
  9966. const auto n_embd_k_gqa = hparams.n_embd_k_gqa();
  9967. const auto n_embd_v_gqa = hparams.n_embd_v_gqa();
  9968. const auto n_ctx = cparams.n_ctx;
  9969. const size_t kv_buf_size = kv_self.total_size();
  9970. const uint32_t kv_head = kv_self.head;
  9971. const uint32_t kv_size = kv_self.size;
  9972. const uint32_t kv_used = kv_self.used;
  9973. data_ctx->write(&kv_buf_size, sizeof(kv_buf_size));
  9974. data_ctx->write(&kv_head, sizeof(kv_head));
  9975. data_ctx->write(&kv_size, sizeof(kv_size));
  9976. data_ctx->write(&kv_used, sizeof(kv_used));
  9977. if (kv_buf_size) {
  9978. const size_t elt_size = ggml_element_size(kv_self.k_l[0]);
  9979. std::vector<uint8_t> tmp_buf;
  9980. for (int il = 0; il < (int) n_layer; ++il) {
  9981. tmp_buf.resize(elt_size*n_embd_k_gqa*kv_head);
  9982. ggml_backend_tensor_get(kv_self.k_l[il], tmp_buf.data(), 0, tmp_buf.size());
  9983. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  9984. // v is not contiguous, copy row by row
  9985. tmp_buf.resize(elt_size*kv_head);
  9986. for (int ir = 0; ir < (int) n_embd_v_gqa; ++ir) {
  9987. ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), ir*elt_size*n_ctx, tmp_buf.size());
  9988. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  9989. }
  9990. }
  9991. }
  9992. for (uint32_t i = 0; i < kv_size; ++i) {
  9993. const auto & cell = kv_self.cells[i];
  9994. const llama_pos pos = cell.pos;
  9995. const size_t seq_id_size = cell.seq_id.size();
  9996. data_ctx->write(&pos, sizeof(pos));
  9997. data_ctx->write(&seq_id_size, sizeof(seq_id_size));
  9998. for (auto seq_id : cell.seq_id) {
  9999. data_ctx->write(&seq_id, sizeof(seq_id));
  10000. }
  10001. }
  10002. }
  10003. }
  10004. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  10005. llama_data_buffer_context data_ctx(dst);
  10006. llama_copy_state_data_internal(ctx, &data_ctx);
  10007. return data_ctx.get_size_written();
  10008. }
  10009. // Sets the state reading from the specified source address
  10010. size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
  10011. uint8_t * inp = src;
  10012. // set rng
  10013. {
  10014. size_t rng_size;
  10015. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  10016. GGML_ASSERT(rng_size <= LLAMA_MAX_RNG_STATE);
  10017. std::string rng_str((char *)inp, rng_size); inp += rng_size;
  10018. std::istringstream rng_ss(rng_str);
  10019. rng_ss >> ctx->rng;
  10020. GGML_ASSERT(!rng_ss.fail());
  10021. }
  10022. // set logits
  10023. {
  10024. size_t logits_size;
  10025. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  10026. GGML_ASSERT(ctx->logits.capacity() >= logits_size);
  10027. if (logits_size) {
  10028. ctx->logits.resize(logits_size);
  10029. memcpy(ctx->logits.data(), inp, logits_size * sizeof(float));
  10030. inp += logits_size * sizeof(float);
  10031. }
  10032. }
  10033. // set embeddings
  10034. {
  10035. size_t embedding_size;
  10036. memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size);
  10037. GGML_ASSERT(ctx->embedding.capacity() == embedding_size);
  10038. if (embedding_size) {
  10039. memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float));
  10040. inp += embedding_size * sizeof(float);
  10041. }
  10042. }
  10043. // set kv cache
  10044. {
  10045. const auto & kv_self = ctx->kv_self;
  10046. const auto & hparams = ctx->model.hparams;
  10047. const auto & cparams = ctx->cparams;
  10048. const int n_layer = hparams.n_layer;
  10049. const int n_embd_k_gqa = hparams.n_embd_k_gqa();
  10050. const int n_embd_v_gqa = hparams.n_embd_v_gqa();
  10051. const int n_ctx = cparams.n_ctx;
  10052. size_t kv_buf_size;
  10053. uint32_t kv_head;
  10054. uint32_t kv_size;
  10055. uint32_t kv_used;
  10056. memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size);
  10057. memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head);
  10058. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  10059. memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used);
  10060. if (kv_buf_size) {
  10061. GGML_ASSERT(kv_self.total_size() == kv_buf_size);
  10062. const size_t elt_size = ggml_element_size(kv_self.k_l[0]);
  10063. for (int il = 0; il < (int) n_layer; ++il) {
  10064. size_t k_size = elt_size*n_embd_k_gqa*kv_head;
  10065. ggml_backend_tensor_set(kv_self.k_l[il], inp, 0, k_size);
  10066. inp += k_size;
  10067. // v is not contiguous, copy row by row
  10068. size_t v_row_size = elt_size*kv_head;
  10069. for (int ir = 0; ir < (int) n_embd_v_gqa; ++ir) {
  10070. ggml_backend_tensor_set(kv_self.v_l[il], inp, ir*elt_size*n_ctx, v_row_size);
  10071. inp += v_row_size;
  10072. }
  10073. }
  10074. }
  10075. ctx->kv_self.head = kv_head;
  10076. ctx->kv_self.size = kv_size;
  10077. ctx->kv_self.used = kv_used;
  10078. ctx->kv_self.cells.resize(kv_size);
  10079. for (uint32_t i = 0; i < kv_size; ++i) {
  10080. llama_pos pos;
  10081. size_t seq_id_size;
  10082. memcpy(&pos, inp, sizeof(pos)); inp += sizeof(pos);
  10083. memcpy(&seq_id_size, inp, sizeof(seq_id_size)); inp += sizeof(seq_id_size);
  10084. ctx->kv_self.cells[i].pos = pos;
  10085. llama_seq_id seq_id;
  10086. for (size_t j = 0; j < seq_id_size; ++j) {
  10087. memcpy(&seq_id, inp, sizeof(seq_id)); inp += sizeof(seq_id);
  10088. ctx->kv_self.cells[i].seq_id.insert(seq_id);
  10089. }
  10090. }
  10091. }
  10092. const size_t nread = inp - src;
  10093. const size_t max_size = llama_get_state_size(ctx);
  10094. GGML_ASSERT(nread <= max_size);
  10095. return nread;
  10096. }
  10097. static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  10098. llama_file file(path_session, "rb");
  10099. // sanity checks
  10100. {
  10101. const uint32_t magic = file.read_u32();
  10102. const uint32_t version = file.read_u32();
  10103. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  10104. LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  10105. return false;
  10106. }
  10107. llama_hparams session_hparams;
  10108. file.read_raw(&session_hparams, sizeof(llama_hparams));
  10109. if (session_hparams != ctx->model.hparams) {
  10110. LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
  10111. return false;
  10112. }
  10113. }
  10114. // load the prompt
  10115. {
  10116. const uint32_t n_token_count = file.read_u32();
  10117. if (n_token_count > n_token_capacity) {
  10118. LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  10119. return false;
  10120. }
  10121. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  10122. *n_token_count_out = n_token_count;
  10123. }
  10124. // restore the context state
  10125. {
  10126. const size_t n_state_size_cur = file.size - file.tell();
  10127. const size_t n_state_size_max = llama_get_state_size(ctx);
  10128. if (n_state_size_cur > n_state_size_max) {
  10129. LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  10130. return false;
  10131. }
  10132. std::vector<uint8_t> state_data(n_state_size_max);
  10133. file.read_raw(state_data.data(), n_state_size_cur);
  10134. llama_set_state_data(ctx, state_data.data());
  10135. }
  10136. return true;
  10137. }
  10138. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  10139. try {
  10140. return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  10141. } catch (const std::exception & err) {
  10142. LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
  10143. return false;
  10144. }
  10145. }
  10146. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  10147. llama_file file(path_session, "wb");
  10148. file.write_u32(LLAMA_SESSION_MAGIC);
  10149. file.write_u32(LLAMA_SESSION_VERSION);
  10150. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  10151. // save the prompt
  10152. file.write_u32((uint32_t) n_token_count);
  10153. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  10154. // save the context state using stream saving
  10155. llama_data_file_context data_ctx(&file);
  10156. llama_copy_state_data_internal(ctx, &data_ctx);
  10157. return true;
  10158. }
  10159. int llama_eval(
  10160. struct llama_context * ctx,
  10161. llama_token * tokens,
  10162. int32_t n_tokens,
  10163. int32_t n_past) {
  10164. llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
  10165. const int ret = llama_decode_internal(*ctx, llama_batch_get_one(tokens, n_tokens, n_past, 0));
  10166. if (ret < 0) {
  10167. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  10168. }
  10169. return ret;
  10170. }
  10171. int llama_eval_embd(
  10172. struct llama_context * ctx,
  10173. float * embd,
  10174. int32_t n_tokens,
  10175. int32_t n_past) {
  10176. llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
  10177. llama_batch batch = { n_tokens, nullptr, embd, nullptr, nullptr, nullptr, nullptr, n_past, 1, 0, };
  10178. const int ret = llama_decode_internal(*ctx, batch);
  10179. if (ret < 0) {
  10180. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  10181. }
  10182. return ret;
  10183. }
  10184. void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch) {
  10185. ctx->cparams.n_threads = n_threads;
  10186. ctx->cparams.n_threads_batch = n_threads_batch;
  10187. }
  10188. struct llama_batch llama_batch_get_one(
  10189. llama_token * tokens,
  10190. int32_t n_tokens,
  10191. llama_pos pos_0,
  10192. llama_seq_id seq_id) {
  10193. return {
  10194. /*n_tokens =*/ n_tokens,
  10195. /*tokens =*/ tokens,
  10196. /*embd =*/ nullptr,
  10197. /*pos =*/ nullptr,
  10198. /*n_seq_id =*/ nullptr,
  10199. /*seq_id =*/ nullptr,
  10200. /*logits =*/ nullptr,
  10201. /*all_pos_0 =*/ pos_0,
  10202. /*all_pos_1 =*/ 1,
  10203. /*all_seq_id =*/ seq_id,
  10204. };
  10205. }
  10206. struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
  10207. llama_batch batch = { 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, 0, 0, 0, };
  10208. if (embd) {
  10209. batch.embd = (float *) malloc(sizeof(float) * n_tokens_alloc * embd);
  10210. } else {
  10211. batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens_alloc);
  10212. }
  10213. batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens_alloc);
  10214. batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens_alloc);
  10215. batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * (n_tokens_alloc + 1));
  10216. for (int i = 0; i < n_tokens_alloc; ++i) {
  10217. batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
  10218. }
  10219. batch.seq_id[n_tokens_alloc] = nullptr;
  10220. batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens_alloc);
  10221. return batch;
  10222. }
  10223. void llama_batch_free(struct llama_batch batch) {
  10224. if (batch.token) free(batch.token);
  10225. if (batch.embd) free(batch.embd);
  10226. if (batch.pos) free(batch.pos);
  10227. if (batch.n_seq_id) free(batch.n_seq_id);
  10228. if (batch.seq_id) {
  10229. for (int i = 0; batch.seq_id[i] != nullptr; ++i) {
  10230. free(batch.seq_id[i]);
  10231. }
  10232. free(batch.seq_id);
  10233. }
  10234. if (batch.logits) free(batch.logits);
  10235. }
  10236. int32_t llama_decode(
  10237. struct llama_context * ctx,
  10238. struct llama_batch batch) {
  10239. const int ret = llama_decode_internal(*ctx, batch);
  10240. if (ret < 0) {
  10241. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  10242. }
  10243. return ret;
  10244. }
  10245. float * llama_get_logits(struct llama_context * ctx) {
  10246. return ctx->logits.data();
  10247. }
  10248. float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
  10249. assert(ctx->logits_valid.at(i));
  10250. return ctx->logits.data() + i*ctx->model.hparams.n_vocab;
  10251. }
  10252. float * llama_get_embeddings(struct llama_context * ctx) {
  10253. return ctx->embedding.data();
  10254. }
  10255. float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
  10256. return ctx->embedding.data() + i*ctx->model.hparams.n_embd;
  10257. }
  10258. const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
  10259. return model->vocab.id_to_token[token].text.c_str();
  10260. }
  10261. float llama_token_get_score(const struct llama_model * model, llama_token token) {
  10262. return model->vocab.id_to_token[token].score;
  10263. }
  10264. llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token) {
  10265. return model->vocab.id_to_token[token].type;
  10266. }
  10267. llama_token llama_token_bos(const struct llama_model * model) {
  10268. return model->vocab.special_bos_id;
  10269. }
  10270. llama_token llama_token_eos(const struct llama_model * model) {
  10271. return model->vocab.special_eos_id;
  10272. }
  10273. llama_token llama_token_nl(const struct llama_model * model) {
  10274. return model->vocab.linefeed_id;
  10275. }
  10276. int32_t llama_add_bos_token(const struct llama_model * model) {
  10277. return model->vocab.special_add_bos;
  10278. }
  10279. int32_t llama_add_eos_token(const struct llama_model * model) {
  10280. return model->vocab.special_add_eos;
  10281. }
  10282. llama_token llama_token_prefix(const struct llama_model * model) {
  10283. return model->vocab.special_prefix_id;
  10284. }
  10285. llama_token llama_token_middle(const struct llama_model * model) {
  10286. return model->vocab.special_middle_id;
  10287. }
  10288. llama_token llama_token_suffix(const struct llama_model * model) {
  10289. return model->vocab.special_suffix_id;
  10290. }
  10291. llama_token llama_token_eot(const struct llama_model * model) {
  10292. return model->vocab.special_eot_id;
  10293. }
  10294. int32_t llama_tokenize(
  10295. const struct llama_model * model,
  10296. const char * text,
  10297. int32_t text_len,
  10298. llama_token * tokens,
  10299. int32_t n_max_tokens,
  10300. bool add_bos,
  10301. bool special) {
  10302. auto res = llama_tokenize_internal(model->vocab, std::string(text, text_len), add_bos, special);
  10303. if (n_max_tokens < (int) res.size()) {
  10304. // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
  10305. return -((int) res.size());
  10306. }
  10307. for (size_t i = 0; i < res.size(); i++) {
  10308. tokens[i] = res[i];
  10309. }
  10310. return res.size();
  10311. }
  10312. static std::string llama_decode_text(const std::string & text) {
  10313. std::string decoded_text;
  10314. auto unicode_sequences = codepoints_from_utf8(text);
  10315. for (auto& unicode_sequence : unicode_sequences) {
  10316. decoded_text += unicode_to_bytes_bpe(codepoint_to_utf8(unicode_sequence));
  10317. }
  10318. return decoded_text;
  10319. }
  10320. // does not write null-terminator to buf
  10321. int32_t llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int32_t length) {
  10322. if (0 <= token && token < llama_n_vocab(model)) {
  10323. switch (llama_vocab_get_type(model->vocab)) {
  10324. case LLAMA_VOCAB_TYPE_WPM:
  10325. case LLAMA_VOCAB_TYPE_SPM: {
  10326. // NOTE: we accept all unsupported token types,
  10327. // suppressing them like CONTROL tokens.
  10328. if (llama_is_normal_token(model->vocab, token)) {
  10329. std::string result = model->vocab.id_to_token[token].text;
  10330. llama_unescape_whitespace(result);
  10331. if (length < (int) result.length()) {
  10332. return -(int) result.length();
  10333. }
  10334. memcpy(buf, result.c_str(), result.length());
  10335. return result.length();
  10336. } else if (llama_is_user_defined_token(model->vocab, token)) {
  10337. std::string result = model->vocab.id_to_token[token].text;
  10338. if (length < (int) result.length()) {
  10339. return -result.length();
  10340. }
  10341. memcpy(buf, result.c_str(), result.length());
  10342. return result.length();
  10343. } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
  10344. if (length < 3) {
  10345. return -3;
  10346. }
  10347. memcpy(buf, "\xe2\x96\x85", 3);
  10348. return 3;
  10349. } else if (llama_is_control_token(model->vocab, token)) {
  10350. ;
  10351. } else if (llama_is_byte_token(model->vocab, token)) {
  10352. if (length < 1) {
  10353. return -1;
  10354. }
  10355. buf[0] = llama_token_to_byte(model->vocab, token);
  10356. return 1;
  10357. }
  10358. break;
  10359. }
  10360. case LLAMA_VOCAB_TYPE_BPE: {
  10361. // NOTE: we accept all unsupported token types,
  10362. // suppressing them like CONTROL tokens.
  10363. if (llama_is_normal_token(model->vocab, token)) {
  10364. std::string result = model->vocab.id_to_token[token].text;
  10365. result = llama_decode_text(result);
  10366. if (length < (int) result.length()) {
  10367. return -(int) result.length();
  10368. }
  10369. memcpy(buf, result.c_str(), result.length());
  10370. return result.length();
  10371. } else if (llama_is_user_defined_token(model->vocab, token)) {
  10372. std::string result = model->vocab.id_to_token[token].text;
  10373. if (length < (int) result.length()) {
  10374. return -result.length();
  10375. }
  10376. memcpy(buf, result.c_str(), result.length());
  10377. return result.length();
  10378. } else if (llama_is_control_token(model->vocab, token)) {
  10379. ;
  10380. }
  10381. break;
  10382. }
  10383. default:
  10384. GGML_ASSERT(false);
  10385. }
  10386. }
  10387. return 0;
  10388. }
  10389. struct llama_timings llama_get_timings(struct llama_context * ctx) {
  10390. struct llama_timings result = {
  10391. /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
  10392. /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
  10393. /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
  10394. /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
  10395. /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
  10396. /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
  10397. /*.n_sample =*/ std::max(1, ctx->n_sample),
  10398. /*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
  10399. /*.n_eval =*/ std::max(1, ctx->n_eval),
  10400. };
  10401. return result;
  10402. }
  10403. void llama_print_timings(struct llama_context * ctx) {
  10404. const llama_timings timings = llama_get_timings(ctx);
  10405. LLAMA_LOG_INFO("\n");
  10406. LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms);
  10407. LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  10408. __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
  10409. LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  10410. __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
  10411. LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  10412. __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
  10413. LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (timings.t_end_ms - timings.t_start_ms), (timings.n_p_eval + timings.n_eval));
  10414. }
  10415. void llama_reset_timings(struct llama_context * ctx) {
  10416. ctx->t_start_us = ggml_time_us();
  10417. ctx->t_sample_us = ctx->n_sample = 0;
  10418. ctx->t_eval_us = ctx->n_eval = 0;
  10419. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  10420. }
  10421. const char * llama_print_system_info(void) {
  10422. static std::string s;
  10423. s = "";
  10424. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  10425. s += "AVX_VNNI = " + std::to_string(ggml_cpu_has_avx_vnni()) + " | ";
  10426. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  10427. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  10428. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  10429. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  10430. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  10431. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  10432. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  10433. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  10434. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  10435. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  10436. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  10437. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  10438. s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
  10439. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  10440. s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
  10441. return s.c_str();
  10442. }
  10443. void llama_dump_timing_info_yaml(FILE * stream, const llama_context * ctx) {
  10444. fprintf(stream, "\n");
  10445. fprintf(stream, "###########\n");
  10446. fprintf(stream, "# Timings #\n");
  10447. fprintf(stream, "###########\n");
  10448. fprintf(stream, "\n");
  10449. fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
  10450. 1.0e-3 * ctx->t_eval_us / ctx->n_eval);
  10451. fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
  10452. 1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
  10453. fprintf(stream, "mst_sample: %.2f # ms / token during sampling\n",
  10454. 1.0e-3 * ctx->t_sample_us / ctx->n_sample);
  10455. fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
  10456. fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
  10457. fprintf(stream, "n_sample: %d # number of sampled tokens\n", ctx->n_sample);
  10458. fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
  10459. fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
  10460. fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
  10461. fprintf(stream, "t_sample_us: %" PRId64 " # total microseconds spent sampling\n", ctx->t_sample_us);
  10462. fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
  10463. 1.0e6 * ctx->n_eval / ctx->t_eval_us);
  10464. fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
  10465. 1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
  10466. fprintf(stream, "ts_sample: %.2f # tokens / second during sampling\n",
  10467. 1.0e6 * ctx->n_sample / ctx->t_sample_us);
  10468. }
  10469. // For internal test use
  10470. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
  10471. struct llama_context * ctx
  10472. ) {
  10473. return ctx->model.tensors_by_name;
  10474. }
  10475. void llama_log_set(ggml_log_callback log_callback, void * user_data) {
  10476. g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
  10477. g_state.log_callback_user_data = user_data;
  10478. #ifdef GGML_USE_METAL
  10479. ggml_backend_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
  10480. #endif
  10481. }
  10482. static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
  10483. va_list args_copy;
  10484. va_copy(args_copy, args);
  10485. char buffer[128];
  10486. int len = vsnprintf(buffer, 128, format, args);
  10487. if (len < 128) {
  10488. g_state.log_callback(level, buffer, g_state.log_callback_user_data);
  10489. } else {
  10490. char* buffer2 = new char[len+1];
  10491. vsnprintf(buffer2, len+1, format, args_copy);
  10492. buffer2[len] = 0;
  10493. g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
  10494. delete[] buffer2;
  10495. }
  10496. va_end(args_copy);
  10497. }
  10498. static void llama_log_internal(ggml_log_level level, const char * format, ...) {
  10499. va_list args;
  10500. va_start(args, format);
  10501. llama_log_internal_v(level, format, args);
  10502. va_end(args);
  10503. }
  10504. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
  10505. (void) level;
  10506. (void) user_data;
  10507. fputs(text, stderr);
  10508. fflush(stderr);
  10509. }