llama.cpp 359 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678
  1. #define LLAMA_API_INTERNAL
  2. #include "llama.h"
  3. #include "unicode.h"
  4. #include "ggml.h"
  5. #include "ggml-alloc.h"
  6. #ifdef GGML_USE_CUBLAS
  7. # include "ggml-cuda.h"
  8. #elif defined(GGML_USE_CLBLAST)
  9. # include "ggml-opencl.h"
  10. #endif
  11. #ifdef GGML_USE_METAL
  12. # include "ggml-metal.h"
  13. #endif
  14. #ifdef GGML_USE_MPI
  15. # include "ggml-mpi.h"
  16. #endif
  17. #ifndef QK_K
  18. # ifdef GGML_QKK_64
  19. # define QK_K 64
  20. # else
  21. # define QK_K 256
  22. # endif
  23. #endif
  24. #ifdef __has_include
  25. #if __has_include(<unistd.h>)
  26. #include <unistd.h>
  27. #if defined(_POSIX_MAPPED_FILES)
  28. #include <sys/mman.h>
  29. #endif
  30. #if defined(_POSIX_MEMLOCK_RANGE)
  31. #include <sys/resource.h>
  32. #endif
  33. #endif
  34. #endif
  35. #if defined(_WIN32)
  36. #define WIN32_LEAN_AND_MEAN
  37. #ifndef NOMINMAX
  38. #define NOMINMAX
  39. #endif
  40. #include <windows.h>
  41. #include <io.h>
  42. #include <stdio.h> // for _fseeki64
  43. #endif
  44. #include <algorithm>
  45. #include <array>
  46. #include <cassert>
  47. #include <cinttypes>
  48. #include <climits>
  49. #include <cmath>
  50. #include <cstdarg>
  51. #include <cstddef>
  52. #include <cstdint>
  53. #include <cstdio>
  54. #include <cstring>
  55. #include <ctime>
  56. #include <forward_list>
  57. #include <fstream>
  58. #include <functional>
  59. #include <initializer_list>
  60. #include <map>
  61. #include <memory>
  62. #include <mutex>
  63. #include <numeric>
  64. #include <queue>
  65. #include <random>
  66. #include <regex>
  67. #include <set>
  68. #include <sstream>
  69. #include <thread>
  70. #include <unordered_map>
  71. #if defined(_MSC_VER)
  72. #pragma warning(disable: 4244 4267) // possible loss of data
  73. #endif
  74. #ifdef __GNUC__
  75. #ifdef __MINGW32__
  76. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  77. #else
  78. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  79. #endif
  80. #else
  81. #define LLAMA_ATTRIBUTE_FORMAT(...)
  82. #endif
  83. #define LLAMA_MAX_NODES 8192
  84. //
  85. // logging
  86. //
  87. LLAMA_ATTRIBUTE_FORMAT(2, 3)
  88. static void llama_log_internal (ggml_log_level level, const char* format, ...);
  89. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
  90. #define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
  91. #define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
  92. #define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  93. //
  94. // helpers
  95. //
  96. static size_t utf8_len(char src) {
  97. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  98. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  99. return lookup[highbits];
  100. }
  101. static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
  102. std::string result;
  103. for (size_t pos = 0; ; pos += search.length()) {
  104. auto new_pos = s.find(search, pos);
  105. if (new_pos == std::string::npos) {
  106. result += s.substr(pos, s.size() - pos);
  107. break;
  108. }
  109. result += s.substr(pos, new_pos - pos) + replace;
  110. pos = new_pos;
  111. }
  112. s = std::move(result);
  113. }
  114. static bool is_float_close(float a, float b, float abs_tol) {
  115. // Check for non-negative tolerance
  116. if (abs_tol < 0.0) {
  117. throw std::invalid_argument("Tolerance must be non-negative");
  118. }
  119. // Exact equality check
  120. if (a == b) {
  121. return true;
  122. }
  123. // Check for infinities
  124. if (std::isinf(a) || std::isinf(b)) {
  125. return false;
  126. }
  127. // Regular comparison using the provided absolute tolerance
  128. return std::fabs(b - a) <= abs_tol;
  129. }
  130. #ifdef GGML_USE_CPU_HBM
  131. #include <hbwmalloc.h>
  132. #endif
  133. static void zeros(std::ofstream & file, size_t n) {
  134. char zero = 0;
  135. for (size_t i = 0; i < n; ++i) {
  136. file.write(&zero, 1);
  137. }
  138. }
  139. LLAMA_ATTRIBUTE_FORMAT(1, 2)
  140. static std::string format(const char * fmt, ...) {
  141. va_list ap;
  142. va_list ap2;
  143. va_start(ap, fmt);
  144. va_copy(ap2, ap);
  145. int size = vsnprintf(NULL, 0, fmt, ap);
  146. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  147. std::vector<char> buf(size + 1);
  148. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  149. GGML_ASSERT(size2 == size);
  150. va_end(ap2);
  151. va_end(ap);
  152. return std::string(buf.data(), size);
  153. }
  154. //
  155. // gguf constants (sync with gguf.py)
  156. //
  157. enum llm_arch {
  158. LLM_ARCH_LLAMA,
  159. LLM_ARCH_FALCON,
  160. LLM_ARCH_BAICHUAN,
  161. LLM_ARCH_GPT2,
  162. LLM_ARCH_GPTJ,
  163. LLM_ARCH_GPTNEOX,
  164. LLM_ARCH_MPT,
  165. LLM_ARCH_STARCODER,
  166. LLM_ARCH_PERSIMMON,
  167. LLM_ARCH_REFACT,
  168. LLM_ARCH_BLOOM,
  169. LLM_ARCH_STABLELM,
  170. LLM_ARCH_UNKNOWN,
  171. };
  172. static std::map<llm_arch, std::string> LLM_ARCH_NAMES = {
  173. { LLM_ARCH_LLAMA, "llama" },
  174. { LLM_ARCH_FALCON, "falcon" },
  175. { LLM_ARCH_GPT2, "gpt2" },
  176. { LLM_ARCH_GPTJ, "gptj" },
  177. { LLM_ARCH_GPTNEOX, "gptneox" },
  178. { LLM_ARCH_MPT, "mpt" },
  179. { LLM_ARCH_BAICHUAN, "baichuan" },
  180. { LLM_ARCH_STARCODER, "starcoder" },
  181. { LLM_ARCH_PERSIMMON, "persimmon" },
  182. { LLM_ARCH_REFACT, "refact" },
  183. { LLM_ARCH_BLOOM, "bloom" },
  184. { LLM_ARCH_STABLELM, "stablelm" },
  185. };
  186. enum llm_kv {
  187. LLM_KV_GENERAL_ARCHITECTURE,
  188. LLM_KV_GENERAL_QUANTIZATION_VERSION,
  189. LLM_KV_GENERAL_ALIGNMENT,
  190. LLM_KV_GENERAL_NAME,
  191. LLM_KV_GENERAL_AUTHOR,
  192. LLM_KV_GENERAL_URL,
  193. LLM_KV_GENERAL_DESCRIPTION,
  194. LLM_KV_GENERAL_LICENSE,
  195. LLM_KV_GENERAL_SOURCE_URL,
  196. LLM_KV_GENERAL_SOURCE_HF_REPO,
  197. LLM_KV_CONTEXT_LENGTH,
  198. LLM_KV_EMBEDDING_LENGTH,
  199. LLM_KV_BLOCK_COUNT,
  200. LLM_KV_FEED_FORWARD_LENGTH,
  201. LLM_KV_USE_PARALLEL_RESIDUAL,
  202. LLM_KV_TENSOR_DATA_LAYOUT,
  203. LLM_KV_ATTENTION_HEAD_COUNT,
  204. LLM_KV_ATTENTION_HEAD_COUNT_KV,
  205. LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
  206. LLM_KV_ATTENTION_CLAMP_KQV,
  207. LLM_KV_ATTENTION_LAYERNORM_EPS,
  208. LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
  209. LLM_KV_ROPE_DIMENSION_COUNT,
  210. LLM_KV_ROPE_FREQ_BASE,
  211. LLM_KV_ROPE_SCALE_LINEAR,
  212. LLM_KV_ROPE_SCALING_TYPE,
  213. LLM_KV_ROPE_SCALING_FACTOR,
  214. LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
  215. LLM_KV_ROPE_SCALING_FINETUNED,
  216. LLM_KV_TOKENIZER_MODEL,
  217. LLM_KV_TOKENIZER_LIST,
  218. LLM_KV_TOKENIZER_TOKEN_TYPE,
  219. LLM_KV_TOKENIZER_SCORES,
  220. LLM_KV_TOKENIZER_MERGES,
  221. LLM_KV_TOKENIZER_BOS_ID,
  222. LLM_KV_TOKENIZER_EOS_ID,
  223. LLM_KV_TOKENIZER_UNK_ID,
  224. LLM_KV_TOKENIZER_SEP_ID,
  225. LLM_KV_TOKENIZER_PAD_ID,
  226. LLM_KV_TOKENIZER_ADD_BOS,
  227. LLM_KV_TOKENIZER_ADD_EOS,
  228. LLM_KV_TOKENIZER_HF_JSON,
  229. LLM_KV_TOKENIZER_RWKV,
  230. };
  231. static std::map<llm_kv, std::string> LLM_KV_NAMES = {
  232. { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
  233. { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
  234. { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
  235. { LLM_KV_GENERAL_NAME, "general.name" },
  236. { LLM_KV_GENERAL_AUTHOR, "general.author" },
  237. { LLM_KV_GENERAL_URL, "general.url" },
  238. { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
  239. { LLM_KV_GENERAL_LICENSE, "general.license" },
  240. { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" },
  241. { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" },
  242. { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
  243. { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
  244. { LLM_KV_BLOCK_COUNT, "%s.block_count" },
  245. { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
  246. { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
  247. { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
  248. { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
  249. { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
  250. { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
  251. { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
  252. { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
  253. { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
  254. { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
  255. { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
  256. { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
  257. { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" },
  258. { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" },
  259. { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
  260. { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" },
  261. { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
  262. { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
  263. { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
  264. { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
  265. { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
  266. { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
  267. { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
  268. { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
  269. { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
  270. { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
  271. { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
  272. { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
  273. { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
  274. { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
  275. };
  276. struct LLM_KV {
  277. LLM_KV(llm_arch arch) : arch(arch) {}
  278. llm_arch arch;
  279. std::string operator()(llm_kv kv) const {
  280. return ::format(LLM_KV_NAMES[kv].c_str(), LLM_ARCH_NAMES[arch].c_str());
  281. }
  282. };
  283. enum llm_tensor {
  284. LLM_TENSOR_TOKEN_EMBD,
  285. LLM_TENSOR_TOKEN_EMBD_NORM,
  286. LLM_TENSOR_POS_EMBD,
  287. LLM_TENSOR_OUTPUT,
  288. LLM_TENSOR_OUTPUT_NORM,
  289. LLM_TENSOR_ROPE_FREQS,
  290. LLM_TENSOR_ATTN_Q,
  291. LLM_TENSOR_ATTN_K,
  292. LLM_TENSOR_ATTN_V,
  293. LLM_TENSOR_ATTN_QKV,
  294. LLM_TENSOR_ATTN_OUT,
  295. LLM_TENSOR_ATTN_NORM,
  296. LLM_TENSOR_ATTN_NORM_2,
  297. LLM_TENSOR_ATTN_ROT_EMBD,
  298. LLM_TENSOR_FFN_GATE,
  299. LLM_TENSOR_FFN_DOWN,
  300. LLM_TENSOR_FFN_UP,
  301. LLM_TENSOR_FFN_NORM,
  302. LLM_TENSOR_ATTN_Q_NORM,
  303. LLM_TENSOR_ATTN_K_NORM,
  304. };
  305. static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
  306. {
  307. LLM_ARCH_LLAMA,
  308. {
  309. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  310. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  311. { LLM_TENSOR_OUTPUT, "output" },
  312. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  313. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  314. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  315. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  316. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  317. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  318. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  319. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  320. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  321. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  322. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  323. },
  324. },
  325. {
  326. LLM_ARCH_BAICHUAN,
  327. {
  328. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  329. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  330. { LLM_TENSOR_OUTPUT, "output" },
  331. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  332. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  333. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  334. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  335. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  336. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  337. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  338. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  339. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  340. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  341. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  342. },
  343. },
  344. {
  345. LLM_ARCH_FALCON,
  346. {
  347. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  348. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  349. { LLM_TENSOR_OUTPUT, "output" },
  350. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  351. { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
  352. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  353. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  354. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  355. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  356. },
  357. },
  358. {
  359. LLM_ARCH_GPT2,
  360. {
  361. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  362. },
  363. },
  364. {
  365. LLM_ARCH_GPTJ,
  366. {
  367. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  368. },
  369. },
  370. {
  371. LLM_ARCH_GPTNEOX,
  372. {
  373. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  374. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  375. { LLM_TENSOR_OUTPUT, "output" },
  376. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  377. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  378. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  379. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  380. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  381. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  382. },
  383. },
  384. {
  385. LLM_ARCH_PERSIMMON,
  386. {
  387. { LLM_TENSOR_TOKEN_EMBD, "token_embd"},
  388. { LLM_TENSOR_OUTPUT_NORM, "output_norm"},
  389. { LLM_TENSOR_OUTPUT, "output"},
  390. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm"},
  391. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv"},
  392. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output"},
  393. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
  394. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
  395. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm"},
  396. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down"},
  397. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up"},
  398. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd"},
  399. },
  400. },
  401. {
  402. LLM_ARCH_MPT,
  403. {
  404. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  405. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  406. { LLM_TENSOR_OUTPUT, "output" },
  407. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  408. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  409. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  410. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  411. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  412. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  413. },
  414. },
  415. {
  416. LLM_ARCH_STARCODER,
  417. {
  418. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  419. { LLM_TENSOR_POS_EMBD, "position_embd" },
  420. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  421. { LLM_TENSOR_OUTPUT, "output" },
  422. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  423. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  424. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  425. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  426. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  427. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  428. },
  429. },
  430. {
  431. LLM_ARCH_REFACT,
  432. {
  433. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  434. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  435. { LLM_TENSOR_OUTPUT, "output" },
  436. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  437. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  438. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  439. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  440. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  441. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  442. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  443. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  444. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  445. },
  446. },
  447. {
  448. LLM_ARCH_BLOOM,
  449. {
  450. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  451. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  452. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  453. { LLM_TENSOR_OUTPUT, "output" },
  454. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  455. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  456. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  457. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  458. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  459. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  460. },
  461. },
  462. {
  463. LLM_ARCH_STABLELM,
  464. {
  465. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  466. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  467. { LLM_TENSOR_OUTPUT, "output" },
  468. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  469. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  470. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  471. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  472. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  473. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  474. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  475. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  476. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  477. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  478. },
  479. },
  480. {
  481. LLM_ARCH_UNKNOWN,
  482. {
  483. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  484. },
  485. },
  486. };
  487. static llm_arch llm_arch_from_string(const std::string & name) {
  488. for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
  489. if (kv.second == name) {
  490. return kv.first;
  491. }
  492. }
  493. return LLM_ARCH_UNKNOWN;
  494. }
  495. // helper to handle gguf constants
  496. // usage:
  497. //
  498. // const auto tn = LLM_TN(LLM_ARCH_LLAMA);
  499. //
  500. // std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
  501. // std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
  502. // std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
  503. //
  504. struct LLM_TN {
  505. LLM_TN(llm_arch arch) : arch(arch) {}
  506. llm_arch arch;
  507. std::string operator()(llm_tensor tensor) const {
  508. return LLM_TENSOR_NAMES[arch].at(tensor);
  509. }
  510. std::string operator()(llm_tensor tensor, const std::string & suffix) const {
  511. return LLM_TENSOR_NAMES[arch].at(tensor) + "." + suffix;
  512. }
  513. std::string operator()(llm_tensor tensor, int bid) const {
  514. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid);
  515. }
  516. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
  517. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid) + "." + suffix;
  518. }
  519. };
  520. //
  521. // gguf helpers
  522. //
  523. #define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
  524. do { \
  525. const std::string skey(key); \
  526. const int kid = gguf_find_key(ctx, skey.c_str()); \
  527. if (kid >= 0) { \
  528. enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \
  529. if (ktype != (type)) { \
  530. throw std::runtime_error(format("key %s has wrong type: %s", skey.c_str(), gguf_type_name(ktype))); \
  531. } \
  532. (dst) = func(ctx, kid); \
  533. } else if (req) { \
  534. throw std::runtime_error(format("key not found in model: %s", skey.c_str())); \
  535. } \
  536. } while (0)
  537. static std::map<int8_t, std::string> LLAMA_ROPE_SCALING_TYPES = {
  538. { LLAMA_ROPE_SCALING_NONE, "none" },
  539. { LLAMA_ROPE_SCALING_LINEAR, "linear" },
  540. { LLAMA_ROPE_SCALING_YARN, "yarn" },
  541. };
  542. static int8_t llama_rope_scaling_type_from_string(const std::string & name) {
  543. for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
  544. if (kv.second == name) {
  545. return kv.first;
  546. }
  547. }
  548. return LLAMA_ROPE_SCALING_UNSPECIFIED;
  549. }
  550. static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
  551. switch (type) {
  552. case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
  553. case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
  554. case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
  555. case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
  556. case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
  557. case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
  558. case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
  559. case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
  560. case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
  561. case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
  562. case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
  563. default: return format("unknown type %d", type);
  564. }
  565. }
  566. static std::string gguf_kv_to_str(struct gguf_context * ctx_gguf, int i) {
  567. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  568. switch (type) {
  569. case GGUF_TYPE_STRING:
  570. return gguf_get_val_str(ctx_gguf, i);
  571. case GGUF_TYPE_ARRAY:
  572. {
  573. const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
  574. int arr_n = gguf_get_arr_n(ctx_gguf, i);
  575. const void * data = gguf_get_arr_data(ctx_gguf, i);
  576. std::stringstream ss;
  577. ss << "[";
  578. for (int j = 0; j < arr_n; j++) {
  579. if (arr_type == GGUF_TYPE_STRING) {
  580. std::string val = gguf_get_arr_str(ctx_gguf, i, j);
  581. // escape quotes
  582. replace_all(val, "\\", "\\\\");
  583. replace_all(val, "\"", "\\\"");
  584. ss << '"' << val << '"';
  585. } else if (arr_type == GGUF_TYPE_ARRAY) {
  586. ss << "???";
  587. } else {
  588. ss << gguf_data_to_str(arr_type, data, j);
  589. }
  590. if (j < arr_n - 1) {
  591. ss << ", ";
  592. }
  593. }
  594. ss << "]";
  595. return ss.str();
  596. }
  597. default:
  598. return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
  599. }
  600. }
  601. //
  602. // ggml helpers
  603. //
  604. static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
  605. struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
  606. if (plan.work_size > 0) {
  607. buf.resize(plan.work_size);
  608. plan.work_data = buf.data();
  609. }
  610. ggml_graph_compute(graph, &plan);
  611. }
  612. //
  613. // llama helpers
  614. //
  615. inline void * llama_host_malloc(size_t n) {
  616. #ifdef GGML_USE_CUBLAS
  617. if (ggml_cublas_loaded()) {
  618. return ggml_cuda_host_malloc(n);
  619. } else {
  620. return malloc(n);
  621. }
  622. #elif GGML_USE_METAL
  623. return ggml_metal_host_malloc(n);
  624. #elif GGML_USE_CPU_HBM
  625. return hbw_malloc(n);
  626. #else
  627. return malloc(n);
  628. #endif
  629. }
  630. inline void llama_host_free(void * ptr) {
  631. #ifdef GGML_USE_CUBLAS
  632. if (ggml_cublas_loaded()) {
  633. return ggml_cuda_host_free(ptr);
  634. } else {
  635. return free(ptr);
  636. }
  637. #elif GGML_USE_METAL
  638. return ggml_metal_host_free(ptr);
  639. #elif GGML_USE_CPU_HBM
  640. return hbw_free(ptr);
  641. #else
  642. return free(ptr);
  643. #endif
  644. }
  645. #if defined(_WIN32)
  646. static std::string llama_format_win_err(DWORD err) {
  647. LPSTR buf;
  648. size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
  649. NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
  650. if (!size) {
  651. return "FormatMessageA failed";
  652. }
  653. std::string ret(buf, size);
  654. LocalFree(buf);
  655. return ret;
  656. }
  657. #endif
  658. struct llama_buffer {
  659. void * data = NULL;
  660. size_t size = 0;
  661. // fallback to malloc / free
  662. // useful in cases where CUDA can try to allocate PINNED memory
  663. bool fallback = false;
  664. void resize(size_t n) {
  665. llama_host_free(data);
  666. data = llama_host_malloc(n);
  667. if (!data) {
  668. fallback = true;
  669. data = malloc(n);
  670. } else {
  671. fallback = false;
  672. }
  673. GGML_ASSERT(data);
  674. size = n;
  675. }
  676. ~llama_buffer() {
  677. if (data) {
  678. if (fallback) { // NOLINT
  679. free(data);
  680. } else {
  681. llama_host_free(data);
  682. }
  683. }
  684. data = NULL;
  685. }
  686. };
  687. struct llama_file {
  688. // use FILE * so we don't have to re-open the file to mmap
  689. FILE * fp;
  690. size_t size;
  691. llama_file(const char * fname, const char * mode) {
  692. fp = std::fopen(fname, mode);
  693. if (fp == NULL) {
  694. throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
  695. }
  696. seek(0, SEEK_END);
  697. size = tell();
  698. seek(0, SEEK_SET);
  699. }
  700. size_t tell() const {
  701. #ifdef _WIN32
  702. __int64 ret = _ftelli64(fp);
  703. #else
  704. long ret = std::ftell(fp);
  705. #endif
  706. GGML_ASSERT(ret != -1); // this really shouldn't fail
  707. return (size_t) ret;
  708. }
  709. void seek(size_t offset, int whence) const {
  710. #ifdef _WIN32
  711. int ret = _fseeki64(fp, (__int64) offset, whence);
  712. #else
  713. int ret = std::fseek(fp, (long) offset, whence);
  714. #endif
  715. GGML_ASSERT(ret == 0); // same
  716. }
  717. void read_raw(void * ptr, size_t len) const {
  718. if (len == 0) {
  719. return;
  720. }
  721. errno = 0;
  722. std::size_t ret = std::fread(ptr, len, 1, fp);
  723. if (ferror(fp)) {
  724. throw std::runtime_error(format("read error: %s", strerror(errno)));
  725. }
  726. if (ret != 1) {
  727. throw std::runtime_error(std::string("unexpectedly reached end of file"));
  728. }
  729. }
  730. uint32_t read_u32() const {
  731. uint32_t ret;
  732. read_raw(&ret, sizeof(ret));
  733. return ret;
  734. }
  735. void write_raw(const void * ptr, size_t len) const {
  736. if (len == 0) {
  737. return;
  738. }
  739. errno = 0;
  740. size_t ret = std::fwrite(ptr, len, 1, fp);
  741. if (ret != 1) {
  742. throw std::runtime_error(format("write error: %s", strerror(errno)));
  743. }
  744. }
  745. void write_u32(std::uint32_t val) const {
  746. write_raw(&val, sizeof(val));
  747. }
  748. ~llama_file() {
  749. if (fp) {
  750. std::fclose(fp);
  751. }
  752. }
  753. };
  754. struct llama_mmap {
  755. void * addr;
  756. size_t size;
  757. llama_mmap(const llama_mmap &) = delete;
  758. #ifdef _POSIX_MAPPED_FILES
  759. static constexpr bool SUPPORTED = true;
  760. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
  761. size = file->size;
  762. int fd = fileno(file->fp);
  763. int flags = MAP_SHARED;
  764. // prefetch/readahead impairs performance on NUMA systems
  765. if (numa) { prefetch = 0; }
  766. #ifdef __linux__
  767. if (prefetch) { flags |= MAP_POPULATE; }
  768. #endif
  769. addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
  770. if (addr == MAP_FAILED) {
  771. throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
  772. }
  773. if (prefetch > 0) {
  774. // Advise the kernel to preload the mapped memory
  775. if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
  776. fprintf(stderr, "warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
  777. strerror(errno));
  778. }
  779. }
  780. if (numa) {
  781. // advise the kernel not to use readahead
  782. // (because the next page might not belong on the same node)
  783. if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
  784. fprintf(stderr, "warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
  785. strerror(errno));
  786. }
  787. }
  788. }
  789. ~llama_mmap() {
  790. munmap(addr, size);
  791. }
  792. #elif defined(_WIN32)
  793. static constexpr bool SUPPORTED = true;
  794. llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) {
  795. (void) numa;
  796. size = file->size;
  797. HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
  798. HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
  799. DWORD error = GetLastError();
  800. if (hMapping == NULL) {
  801. throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
  802. }
  803. addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
  804. error = GetLastError();
  805. CloseHandle(hMapping);
  806. if (addr == NULL) {
  807. throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
  808. }
  809. if (prefetch) {
  810. // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
  811. BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
  812. HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
  813. // may fail on pre-Windows 8 systems
  814. pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
  815. if (pPrefetchVirtualMemory) {
  816. // advise the kernel to preload the mapped memory
  817. WIN32_MEMORY_RANGE_ENTRY range;
  818. range.VirtualAddress = addr;
  819. range.NumberOfBytes = (SIZE_T)size;
  820. if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
  821. fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
  822. llama_format_win_err(GetLastError()).c_str());
  823. }
  824. }
  825. }
  826. }
  827. ~llama_mmap() {
  828. if (!UnmapViewOfFile(addr)) {
  829. fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
  830. llama_format_win_err(GetLastError()).c_str());
  831. }
  832. }
  833. #else
  834. static constexpr bool SUPPORTED = false;
  835. llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) {
  836. (void) file;
  837. (void) prefetch;
  838. (void) numa;
  839. throw std::runtime_error(std::string("mmap not supported"));
  840. }
  841. #endif
  842. };
  843. // Represents some region of memory being locked using mlock or VirtualLock;
  844. // will automatically unlock on destruction.
  845. struct llama_mlock {
  846. void * addr = NULL;
  847. size_t size = 0;
  848. bool failed_already = false;
  849. llama_mlock() {}
  850. llama_mlock(const llama_mlock &) = delete;
  851. ~llama_mlock() {
  852. if (size) {
  853. raw_unlock(addr, size);
  854. }
  855. }
  856. void init(void * ptr) {
  857. GGML_ASSERT(addr == NULL && size == 0); // NOLINT
  858. addr = ptr;
  859. }
  860. void grow_to(size_t target_size) {
  861. GGML_ASSERT(addr);
  862. if (failed_already) {
  863. return;
  864. }
  865. size_t granularity = lock_granularity();
  866. target_size = (target_size + granularity - 1) & ~(granularity - 1);
  867. if (target_size > size) {
  868. if (raw_lock((uint8_t *) addr + size, target_size - size)) {
  869. size = target_size;
  870. } else {
  871. failed_already = true;
  872. }
  873. }
  874. }
  875. #ifdef _POSIX_MEMLOCK_RANGE
  876. static constexpr bool SUPPORTED = true;
  877. static size_t lock_granularity() {
  878. return (size_t) sysconf(_SC_PAGESIZE);
  879. }
  880. #ifdef __APPLE__
  881. #define MLOCK_SUGGESTION \
  882. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  883. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
  884. #else
  885. #define MLOCK_SUGGESTION \
  886. "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
  887. #endif
  888. bool raw_lock(const void * addr, size_t size) const {
  889. if (!mlock(addr, size)) {
  890. return true;
  891. }
  892. char* errmsg = std::strerror(errno);
  893. bool suggest = (errno == ENOMEM);
  894. // Check if the resource limit is fine after all
  895. struct rlimit lock_limit;
  896. if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
  897. suggest = false;
  898. }
  899. if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
  900. suggest = false;
  901. }
  902. fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
  903. size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
  904. return false;
  905. }
  906. #undef MLOCK_SUGGESTION
  907. static void raw_unlock(void * addr, size_t size) {
  908. if (munlock(addr, size)) {
  909. fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
  910. }
  911. }
  912. #elif defined(_WIN32)
  913. static constexpr bool SUPPORTED = true;
  914. static size_t lock_granularity() {
  915. SYSTEM_INFO si;
  916. GetSystemInfo(&si);
  917. return (size_t) si.dwPageSize;
  918. }
  919. bool raw_lock(void * ptr, size_t len) const {
  920. for (int tries = 1; ; tries++) {
  921. if (VirtualLock(ptr, len)) {
  922. return true;
  923. }
  924. if (tries == 2) {
  925. fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
  926. len, size, llama_format_win_err(GetLastError()).c_str());
  927. return false;
  928. }
  929. // It failed but this was only the first try; increase the working
  930. // set size and try again.
  931. SIZE_T min_ws_size, max_ws_size;
  932. if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
  933. fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
  934. llama_format_win_err(GetLastError()).c_str());
  935. return false;
  936. }
  937. // Per MSDN: "The maximum number of pages that a process can lock
  938. // is equal to the number of pages in its minimum working set minus
  939. // a small overhead."
  940. // Hopefully a megabyte is enough overhead:
  941. size_t increment = len + 1048576;
  942. // The minimum must be <= the maximum, so we need to increase both:
  943. min_ws_size += increment;
  944. max_ws_size += increment;
  945. if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
  946. fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
  947. llama_format_win_err(GetLastError()).c_str());
  948. return false;
  949. }
  950. }
  951. }
  952. static void raw_unlock(void * ptr, size_t len) {
  953. if (!VirtualUnlock(ptr, len)) {
  954. fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
  955. llama_format_win_err(GetLastError()).c_str());
  956. }
  957. }
  958. #else
  959. static constexpr bool SUPPORTED = false;
  960. static size_t lock_granularity() {
  961. return (size_t) 65536;
  962. }
  963. bool raw_lock(const void * addr, size_t len) const {
  964. fprintf(stderr, "warning: mlock not supported on this system\n");
  965. return false;
  966. }
  967. static void raw_unlock(const void * addr, size_t len) {}
  968. #endif
  969. };
  970. typedef void (*offload_func_t)(struct ggml_tensor * tensor);
  971. static void ggml_offload_nop(struct ggml_tensor * tensor) {
  972. (void) tensor;
  973. }
  974. static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) {
  975. std::vector<char> result(8, 0);
  976. const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
  977. if (n_tokens < 0) {
  978. result.resize(-n_tokens);
  979. int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
  980. GGML_ASSERT(check == -n_tokens);
  981. }
  982. else {
  983. result.resize(n_tokens);
  984. }
  985. return std::string(result.data(), result.size());
  986. }
  987. //
  988. // globals
  989. //
  990. struct llama_state {
  991. // We save the log callback globally
  992. ggml_log_callback log_callback = llama_log_callback_default;
  993. void * log_callback_user_data = nullptr;
  994. };
  995. static llama_state g_state;
  996. // available llama models
  997. enum e_model {
  998. MODEL_UNKNOWN,
  999. MODEL_1B,
  1000. MODEL_3B,
  1001. MODEL_7B,
  1002. MODEL_8B,
  1003. MODEL_13B,
  1004. MODEL_15B,
  1005. MODEL_30B,
  1006. MODEL_34B,
  1007. MODEL_40B,
  1008. MODEL_65B,
  1009. MODEL_70B,
  1010. };
  1011. static const size_t kiB = 1024;
  1012. static const size_t MiB = 1024*kiB;
  1013. static const size_t GiB = 1024*MiB;
  1014. struct llama_hparams {
  1015. bool vocab_only;
  1016. uint32_t n_vocab;
  1017. uint32_t n_ctx_train; // context size the model was trained on
  1018. uint32_t n_embd;
  1019. uint32_t n_head;
  1020. uint32_t n_head_kv;
  1021. uint32_t n_layer;
  1022. uint32_t n_rot;
  1023. uint32_t n_ff;
  1024. float f_norm_eps;
  1025. float f_norm_rms_eps;
  1026. float rope_freq_base_train;
  1027. float rope_freq_scale_train;
  1028. uint32_t n_yarn_orig_ctx;
  1029. int8_t rope_scaling_type_train : 3;
  1030. bool rope_finetuned : 1;
  1031. float f_clamp_kqv;
  1032. float f_max_alibi_bias;
  1033. bool operator!=(const llama_hparams & other) const {
  1034. if (this->vocab_only != other.vocab_only) return true;
  1035. if (this->n_vocab != other.n_vocab) return true;
  1036. if (this->n_ctx_train != other.n_ctx_train) return true;
  1037. if (this->n_embd != other.n_embd) return true;
  1038. if (this->n_head != other.n_head) return true;
  1039. if (this->n_head_kv != other.n_head_kv) return true;
  1040. if (this->n_layer != other.n_layer) return true;
  1041. if (this->n_rot != other.n_rot) return true;
  1042. if (this->n_ff != other.n_ff) return true;
  1043. if (this->rope_finetuned != other.rope_finetuned) return true;
  1044. if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true;
  1045. const float EPSILON = 1e-9;
  1046. if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
  1047. if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
  1048. if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true;
  1049. if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true;
  1050. return false;
  1051. }
  1052. uint32_t n_gqa() const {
  1053. return n_head/n_head_kv;
  1054. }
  1055. uint32_t n_embd_head() const {
  1056. return n_embd/n_head;
  1057. }
  1058. uint32_t n_embd_gqa() const {
  1059. return n_embd/n_gqa();
  1060. }
  1061. };
  1062. struct llama_cparams {
  1063. uint32_t n_ctx; // context size used during inference
  1064. uint32_t n_batch;
  1065. uint32_t n_threads; // number of threads to use for generation
  1066. uint32_t n_threads_batch; // number of threads to use for batch processing
  1067. float rope_freq_base;
  1068. float rope_freq_scale;
  1069. uint32_t n_yarn_orig_ctx;
  1070. // These hyperparameters are not exposed in GGUF, because all
  1071. // existing YaRN models use the same values for them.
  1072. float yarn_ext_factor;
  1073. float yarn_attn_factor;
  1074. float yarn_beta_fast;
  1075. float yarn_beta_slow;
  1076. bool mul_mat_q;
  1077. };
  1078. struct llama_layer {
  1079. // normalization
  1080. struct ggml_tensor * attn_norm;
  1081. struct ggml_tensor * attn_norm_b;
  1082. struct ggml_tensor * attn_norm_2;
  1083. struct ggml_tensor * attn_norm_2_b;
  1084. struct ggml_tensor * attn_q_norm;
  1085. struct ggml_tensor * attn_q_norm_b;
  1086. struct ggml_tensor * attn_k_norm;
  1087. struct ggml_tensor * attn_k_norm_b;
  1088. // attention
  1089. struct ggml_tensor * wq;
  1090. struct ggml_tensor * wk;
  1091. struct ggml_tensor * wv;
  1092. struct ggml_tensor * wo;
  1093. struct ggml_tensor * wqkv;
  1094. // attention bias
  1095. struct ggml_tensor * bo;
  1096. struct ggml_tensor * bqkv;
  1097. // normalization
  1098. struct ggml_tensor * ffn_norm;
  1099. struct ggml_tensor * ffn_norm_b;
  1100. // ff
  1101. struct ggml_tensor * ffn_gate; // w1
  1102. struct ggml_tensor * ffn_down; // w2
  1103. struct ggml_tensor * ffn_up; // w3
  1104. // ff bias
  1105. struct ggml_tensor * ffn_down_b; // b2
  1106. struct ggml_tensor * ffn_up_b; // b3
  1107. };
  1108. struct llama_kv_cell {
  1109. llama_pos pos = -1;
  1110. llama_pos delta = 0;
  1111. std::set<llama_seq_id> seq_id;
  1112. bool has_seq_id(const llama_seq_id & id) const {
  1113. return seq_id.find(id) != seq_id.end();
  1114. }
  1115. };
  1116. // ring-buffer of cached KV data
  1117. struct llama_kv_cache {
  1118. bool has_shift = false;
  1119. // Note: The value of head isn't only used to optimize searching
  1120. // for a free KV slot. llama_decode_internal also uses it, so it
  1121. // cannot be freely changed after a slot has been allocated.
  1122. uint32_t head = 0;
  1123. uint32_t size = 0;
  1124. // computed before each graph build
  1125. uint32_t n = 0;
  1126. std::vector<llama_kv_cell> cells;
  1127. struct ggml_tensor * k = NULL;
  1128. struct ggml_tensor * v = NULL;
  1129. struct ggml_context * ctx = NULL;
  1130. llama_buffer buf;
  1131. ~llama_kv_cache() {
  1132. if (ctx) {
  1133. ggml_free(ctx);
  1134. }
  1135. #ifdef GGML_USE_CUBLAS
  1136. if (ggml_cublas_loaded()) {
  1137. ggml_cuda_free_data(k);
  1138. ggml_cuda_free_data(v);
  1139. }
  1140. #endif
  1141. }
  1142. };
  1143. struct llama_vocab {
  1144. using id = int32_t;
  1145. using token = std::string;
  1146. using ttype = llama_token_type;
  1147. struct token_data {
  1148. token text;
  1149. float score;
  1150. ttype type;
  1151. };
  1152. enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM;
  1153. std::unordered_map<token, id> token_to_id;
  1154. std::vector<token_data> id_to_token;
  1155. std::unordered_map<token, id> special_tokens_cache;
  1156. std::map<std::pair<std::string, std::string>, int> bpe_ranks;
  1157. // default LLaMA special tokens
  1158. id special_bos_id = 1;
  1159. id special_eos_id = 2;
  1160. id special_unk_id = 0;
  1161. id special_sep_id = -1;
  1162. id special_pad_id = -1;
  1163. int special_add_bos = -1; // -1 unknown, 1 add, 0 don't add.
  1164. int special_add_eos = -1; // -1 unknown, 1 add, 0 don't add.
  1165. id linefeed_id = 13;
  1166. id special_prefix_id = 32007;
  1167. id special_middle_id = 32009;
  1168. id special_suffix_id = 32008;
  1169. id special_eot_id = 32010;
  1170. int find_bpe_rank(std::string token_left, std::string token_right) const {
  1171. GGML_ASSERT(token_left.find(" ") == std::string::npos);
  1172. GGML_ASSERT(token_left.find("\n") == std::string::npos);
  1173. GGML_ASSERT(token_right.find(" ") == std::string::npos);
  1174. GGML_ASSERT(token_right.find("\n") == std::string::npos);
  1175. auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
  1176. if (it == bpe_ranks.end()) {
  1177. return -1;
  1178. }
  1179. return it->second;
  1180. }
  1181. };
  1182. struct llama_model {
  1183. e_model type = MODEL_UNKNOWN;
  1184. llm_arch arch = LLM_ARCH_UNKNOWN;
  1185. llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
  1186. std::string name = "n/a";
  1187. llama_hparams hparams = {};
  1188. llama_vocab vocab;
  1189. struct ggml_tensor * tok_embd;
  1190. struct ggml_tensor * pos_embd;
  1191. struct ggml_tensor * tok_norm;
  1192. struct ggml_tensor * tok_norm_b;
  1193. struct ggml_tensor * output_norm;
  1194. struct ggml_tensor * output_norm_b;
  1195. struct ggml_tensor * output;
  1196. std::vector<llama_layer> layers;
  1197. int n_gpu_layers;
  1198. // gguf metadata
  1199. std::unordered_map<std::string, std::string> gguf_kv;
  1200. // context
  1201. struct ggml_context * ctx = NULL;
  1202. // the model memory buffer
  1203. llama_buffer buf;
  1204. // model memory mapped file
  1205. std::unique_ptr<llama_mmap> mapping;
  1206. // objects representing data potentially being locked in memory
  1207. llama_mlock mlock_buf;
  1208. llama_mlock mlock_mmap;
  1209. // for quantize-stats only
  1210. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  1211. int64_t t_load_us = 0;
  1212. int64_t t_start_us = 0;
  1213. ~llama_model() {
  1214. if (ctx) {
  1215. ggml_free(ctx);
  1216. }
  1217. #ifdef GGML_USE_CUBLAS
  1218. if (ggml_cublas_loaded()) {
  1219. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  1220. ggml_cuda_free_data(tensors_by_name[i].second);
  1221. }
  1222. ggml_cuda_free_scratch();
  1223. }
  1224. #endif
  1225. #if defined(GGML_USE_CLBLAST)
  1226. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  1227. ggml_cl_free_data(tensors_by_name[i].second);
  1228. }
  1229. #endif
  1230. }
  1231. };
  1232. struct llama_context {
  1233. llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {}
  1234. ~llama_context() {
  1235. #ifdef GGML_USE_METAL
  1236. if (ctx_metal) {
  1237. ggml_metal_free(ctx_metal);
  1238. }
  1239. #endif
  1240. if (alloc) {
  1241. ggml_allocr_free(alloc);
  1242. }
  1243. }
  1244. llama_cparams cparams;
  1245. const llama_model & model;
  1246. // key + value cache for the self attention
  1247. struct llama_kv_cache kv_self;
  1248. std::mt19937 rng;
  1249. bool has_evaluated_once = false;
  1250. int64_t t_start_us;
  1251. int64_t t_load_us;
  1252. int64_t t_sample_us = 0;
  1253. int64_t t_p_eval_us = 0;
  1254. int64_t t_eval_us = 0;
  1255. int32_t n_sample = 0; // number of tokens sampled
  1256. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  1257. int32_t n_eval = 0; // number of eval calls
  1258. // decode output (2-dimensional array: [n_tokens][n_vocab])
  1259. std::vector<float> logits;
  1260. bool logits_all = false;
  1261. // input embedding (1-dimensional array: [n_embd])
  1262. std::vector<float> embedding;
  1263. // reusable buffer for `struct ggml_graph_plan.work_data`
  1264. std::vector<uint8_t> work_buffer;
  1265. // memory buffers used to evaluate the model
  1266. llama_buffer buf_compute;
  1267. llama_buffer buf_alloc;
  1268. ggml_allocr * alloc = NULL;
  1269. #ifdef GGML_USE_METAL
  1270. ggml_metal_context * ctx_metal = NULL;
  1271. #endif
  1272. #ifdef GGML_USE_MPI
  1273. ggml_mpi_context * ctx_mpi = NULL;
  1274. #endif
  1275. };
  1276. //
  1277. // kv cache helpers
  1278. //
  1279. static bool llama_kv_cache_init(
  1280. const struct llama_hparams & hparams,
  1281. struct llama_kv_cache & cache,
  1282. ggml_type wtype,
  1283. uint32_t n_ctx,
  1284. int n_gpu_layers) {
  1285. const uint32_t n_embd = hparams.n_embd_gqa();
  1286. const uint32_t n_layer = hparams.n_layer;
  1287. const int64_t n_mem = n_layer*n_ctx;
  1288. const int64_t n_elements = n_embd*n_mem;
  1289. cache.has_shift = false;
  1290. cache.head = 0;
  1291. cache.size = n_ctx;
  1292. cache.cells.clear();
  1293. cache.cells.resize(n_ctx);
  1294. cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*ggml_tensor_overhead());
  1295. memset(cache.buf.data, 0, cache.buf.size);
  1296. struct ggml_init_params params;
  1297. params.mem_size = cache.buf.size;
  1298. params.mem_buffer = cache.buf.data;
  1299. params.no_alloc = false;
  1300. cache.ctx = ggml_init(params);
  1301. if (!cache.ctx) {
  1302. LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
  1303. return false;
  1304. }
  1305. cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
  1306. cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
  1307. ggml_set_name(cache.k, "cache_k");
  1308. ggml_set_name(cache.v, "cache_v");
  1309. (void) n_gpu_layers;
  1310. #ifdef GGML_USE_CUBLAS
  1311. if (ggml_cublas_loaded()) {
  1312. size_t vram_kv_cache = 0;
  1313. if (n_gpu_layers > (int)n_layer + 1) {
  1314. ggml_cuda_assign_buffers_no_scratch(cache.v);
  1315. LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
  1316. vram_kv_cache += ggml_nbytes(cache.v);
  1317. }
  1318. if (n_gpu_layers > (int)n_layer + 2) {
  1319. ggml_cuda_assign_buffers_no_scratch(cache.k);
  1320. LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
  1321. vram_kv_cache += ggml_nbytes(cache.k);
  1322. }
  1323. if (vram_kv_cache > 0) {
  1324. LLAMA_LOG_INFO("%s: VRAM kv self = %.2f MiB\n", __func__, vram_kv_cache / 1024.0 / 1024.0);
  1325. }
  1326. }
  1327. #endif
  1328. return true;
  1329. }
  1330. // find an empty slot of size "n_tokens" in the cache
  1331. // updates the cache head
  1332. // Note: On success, it's important that cache.head points
  1333. // to the first cell of the slot.
  1334. static bool llama_kv_cache_find_slot(
  1335. struct llama_kv_cache & cache,
  1336. const struct llama_batch & batch) {
  1337. const uint32_t n_ctx = cache.size;
  1338. const uint32_t n_tokens = batch.n_tokens;
  1339. if (n_tokens > n_ctx) {
  1340. LLAMA_LOG_ERROR("%s: n_tokens=%d > n_ctx=%d\n", __func__, n_tokens, n_ctx);
  1341. return false;
  1342. }
  1343. uint32_t n_tested = 0;
  1344. while (true) {
  1345. if (cache.head + n_tokens > n_ctx) {
  1346. n_tested += n_ctx - cache.head;
  1347. cache.head = 0;
  1348. continue;
  1349. }
  1350. bool found = true;
  1351. for (uint32_t i = 0; i < n_tokens; i++) {
  1352. if (cache.cells[cache.head + i].pos >= 0) {
  1353. found = false;
  1354. cache.head += i + 1;
  1355. n_tested += i + 1;
  1356. break;
  1357. }
  1358. }
  1359. if (found) {
  1360. break;
  1361. }
  1362. if (n_tested >= n_ctx) {
  1363. //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
  1364. return false;
  1365. }
  1366. }
  1367. for (uint32_t i = 0; i < n_tokens; i++) {
  1368. cache.cells[cache.head + i].pos = batch.pos[i];
  1369. for (int32_t j = 0; j < batch.n_seq_id[i]; j++) {
  1370. cache.cells[cache.head + i].seq_id.insert(batch.seq_id[i][j]);
  1371. }
  1372. }
  1373. return true;
  1374. }
  1375. // find how many cells are currently in use
  1376. static int32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
  1377. for (uint32_t i = cache.size - 1; i > 0; --i) {
  1378. if (cache.cells[i].pos >= 0 && !cache.cells[i].seq_id.empty()) {
  1379. return i + 1;
  1380. }
  1381. }
  1382. return 0;
  1383. }
  1384. static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
  1385. for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
  1386. cache.cells[i].pos = -1;
  1387. cache.cells[i].seq_id.clear();
  1388. }
  1389. cache.head = 0;
  1390. }
  1391. static void llama_kv_cache_seq_rm(
  1392. struct llama_kv_cache & cache,
  1393. llama_seq_id seq_id,
  1394. llama_pos p0,
  1395. llama_pos p1) {
  1396. uint32_t new_head = cache.size;
  1397. if (p0 < 0) p0 = 0;
  1398. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1399. for (uint32_t i = 0; i < cache.size; ++i) {
  1400. if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1401. if (seq_id < 0) {
  1402. cache.cells[i].seq_id.clear();
  1403. } else if (cache.cells[i].has_seq_id(seq_id)) {
  1404. cache.cells[i].seq_id.erase(seq_id);
  1405. } else {
  1406. continue;
  1407. }
  1408. if (cache.cells[i].seq_id.empty()) {
  1409. cache.cells[i].pos = -1;
  1410. if (new_head == cache.size) new_head = i;
  1411. }
  1412. }
  1413. }
  1414. // If we freed up a slot, set head to it so searching can start there.
  1415. if (new_head != cache.size) cache.head = new_head;
  1416. }
  1417. static void llama_kv_cache_seq_cp(
  1418. struct llama_kv_cache & cache,
  1419. llama_seq_id seq_id_src,
  1420. llama_seq_id seq_id_dst,
  1421. llama_pos p0,
  1422. llama_pos p1) {
  1423. if (p0 < 0) p0 = 0;
  1424. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1425. cache.head = 0;
  1426. for (uint32_t i = 0; i < cache.size; ++i) {
  1427. if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1428. cache.cells[i].seq_id.insert(seq_id_dst);
  1429. }
  1430. }
  1431. }
  1432. static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
  1433. uint32_t new_head = cache.size;
  1434. for (uint32_t i = 0; i < cache.size; ++i) {
  1435. if (!cache.cells[i].has_seq_id(seq_id)) {
  1436. cache.cells[i].pos = -1;
  1437. cache.cells[i].seq_id.clear();
  1438. if (new_head == cache.size) new_head = i;
  1439. } else {
  1440. cache.cells[i].seq_id.clear();
  1441. cache.cells[i].seq_id.insert(seq_id);
  1442. }
  1443. }
  1444. // If we freed up a slot, set head to it so searching can start there.
  1445. if (new_head != cache.size) cache.head = new_head;
  1446. }
  1447. static void llama_kv_cache_seq_shift(
  1448. struct llama_kv_cache & cache,
  1449. llama_seq_id seq_id,
  1450. llama_pos p0,
  1451. llama_pos p1,
  1452. llama_pos delta) {
  1453. uint32_t new_head = cache.size;
  1454. if (p0 < 0) p0 = 0;
  1455. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1456. for (uint32_t i = 0; i < cache.size; ++i) {
  1457. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1458. cache.has_shift = true;
  1459. cache.cells[i].pos += delta;
  1460. cache.cells[i].delta += delta;
  1461. if (cache.cells[i].pos < 0) {
  1462. cache.cells[i].pos = -1;
  1463. cache.cells[i].seq_id.clear();
  1464. if (new_head == cache.size) new_head = i;
  1465. }
  1466. }
  1467. }
  1468. // If we freed up a slot, set head to it so searching can start there.
  1469. // Otherwise we just start the next search from the beginning.
  1470. cache.head = new_head != cache.size ? new_head : 0;
  1471. }
  1472. //
  1473. // model loading and saving
  1474. //
  1475. enum llama_fver {
  1476. GGUF_FILE_VERSION_V1 = 1,
  1477. GGUF_FILE_VERSION_V2 = 2,
  1478. GGUF_FILE_VERSION_V3 = 3,
  1479. };
  1480. static const char * llama_file_version_name(llama_fver version) {
  1481. switch (version) {
  1482. case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
  1483. case GGUF_FILE_VERSION_V2: return "GGUF V2";
  1484. case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
  1485. }
  1486. return "unknown";
  1487. }
  1488. static std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
  1489. char buf[256];
  1490. snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
  1491. for (size_t i = 1; i < ne.size(); i++) {
  1492. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
  1493. }
  1494. return buf;
  1495. }
  1496. static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
  1497. char buf[256];
  1498. snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
  1499. for (int i = 1; i < GGML_MAX_DIMS; i++) {
  1500. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
  1501. }
  1502. return buf;
  1503. }
  1504. struct llama_model_loader {
  1505. int n_kv = 0;
  1506. int n_tensors = 0;
  1507. int n_created = 0;
  1508. int64_t n_elements = 0;
  1509. size_t n_bytes = 0;
  1510. bool use_mmap = false;
  1511. llama_file file;
  1512. llama_ftype ftype;
  1513. llama_fver fver;
  1514. std::unique_ptr<llama_mmap> mapping;
  1515. struct gguf_context * ctx_gguf = NULL;
  1516. struct ggml_context * ctx_meta = NULL;
  1517. llama_model_loader(const std::string & fname, bool use_mmap) : file(fname.c_str(), "rb") {
  1518. struct gguf_init_params params = {
  1519. /*.no_alloc = */ true,
  1520. /*.ctx = */ &ctx_meta,
  1521. };
  1522. ctx_gguf = gguf_init_from_file(fname.c_str(), params);
  1523. if (!ctx_gguf) {
  1524. throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
  1525. }
  1526. n_kv = gguf_get_n_kv(ctx_gguf);
  1527. n_tensors = gguf_get_n_tensors(ctx_gguf);
  1528. fver = (enum llama_fver ) gguf_get_version(ctx_gguf);
  1529. for (int i = 0; i < n_tensors; i++) {
  1530. const char * name = gguf_get_tensor_name(ctx_gguf, i);
  1531. struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name);
  1532. n_elements += ggml_nelements(t);
  1533. n_bytes += ggml_nbytes(t);
  1534. }
  1535. LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
  1536. __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
  1537. // determine file type based on the number of tensors for each quantization and print meta data
  1538. // TODO: make optional
  1539. {
  1540. std::map<enum ggml_type, uint32_t> n_type;
  1541. uint32_t n_type_max = 0;
  1542. enum ggml_type type_max = GGML_TYPE_F32;
  1543. for (int i = 0; i < n_tensors; i++) {
  1544. const char * name = gguf_get_tensor_name(ctx_gguf, i);
  1545. struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, name);
  1546. n_type[meta->type]++;
  1547. if (n_type_max < n_type[meta->type]) {
  1548. n_type_max = n_type[meta->type];
  1549. type_max = meta->type;
  1550. }
  1551. LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str());
  1552. }
  1553. switch (type_max) {
  1554. case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
  1555. case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
  1556. case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
  1557. case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
  1558. case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
  1559. case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
  1560. case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
  1561. case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
  1562. case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
  1563. case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
  1564. case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
  1565. case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
  1566. default:
  1567. {
  1568. LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
  1569. ftype = LLAMA_FTYPE_ALL_F32;
  1570. } break;
  1571. }
  1572. // this is a way to mark that we have "guessed" the file type
  1573. ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
  1574. {
  1575. const int kid = gguf_find_key(ctx_gguf, "general.file_type");
  1576. if (kid >= 0) {
  1577. ftype = (llama_ftype) gguf_get_val_u32(ctx_gguf, kid);
  1578. }
  1579. }
  1580. for (int i = 0; i < n_kv; i++) {
  1581. const char * name = gguf_get_key(ctx_gguf, i);
  1582. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  1583. const std::string type_name =
  1584. type == GGUF_TYPE_ARRAY
  1585. ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx_gguf, i)), gguf_get_arr_n(ctx_gguf, i))
  1586. : gguf_type_name(type);
  1587. std::string value = gguf_kv_to_str(ctx_gguf, i);
  1588. const size_t MAX_VALUE_LEN = 40;
  1589. if (value.size() > MAX_VALUE_LEN) {
  1590. value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
  1591. }
  1592. replace_all(value, "\n", "\\n");
  1593. LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
  1594. }
  1595. // print type counts
  1596. for (auto & kv : n_type) {
  1597. if (kv.second == 0) {
  1598. continue;
  1599. }
  1600. LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
  1601. }
  1602. }
  1603. if (!llama_mmap::SUPPORTED) {
  1604. LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
  1605. use_mmap = false;
  1606. }
  1607. this->use_mmap = use_mmap;
  1608. }
  1609. ~llama_model_loader() {
  1610. if (ctx_gguf) {
  1611. gguf_free(ctx_gguf);
  1612. }
  1613. if (ctx_meta) {
  1614. ggml_free(ctx_meta);
  1615. }
  1616. }
  1617. std::string get_arch_name() const {
  1618. const auto kv = LLM_KV(LLM_ARCH_UNKNOWN);
  1619. std::string arch_name;
  1620. GGUF_GET_KEY(ctx_gguf, arch_name, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_GENERAL_ARCHITECTURE));
  1621. return arch_name;
  1622. }
  1623. enum llm_arch get_arch() const {
  1624. const std::string arch_name = get_arch_name();
  1625. return llm_arch_from_string(arch_name);
  1626. }
  1627. const char * get_tensor_name(int i) const {
  1628. return gguf_get_tensor_name(ctx_gguf, i);
  1629. }
  1630. struct ggml_tensor * get_tensor_meta(int i) const {
  1631. return ggml_get_tensor(ctx_meta, get_tensor_name(i));
  1632. }
  1633. void calc_sizes(size_t & ctx_size_p, size_t & mmapped_size_p) const {
  1634. ctx_size_p = 0;
  1635. mmapped_size_p = 0;
  1636. for (int i = 0; i < n_tensors; i++) {
  1637. struct ggml_tensor * meta = get_tensor_meta(i);
  1638. ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE;
  1639. (use_mmap ? mmapped_size_p : ctx_size_p) += ggml_nbytes_pad(meta);
  1640. }
  1641. }
  1642. struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend_type backend) {
  1643. if (backend != GGML_BACKEND_CPU) {
  1644. ggml_set_no_alloc(ctx, true);
  1645. }
  1646. struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta);
  1647. tensor->backend = backend; // TODO: ggml_set_backend
  1648. ggml_set_name(tensor, ggml_get_name(meta));
  1649. if (backend != GGML_BACKEND_CPU) {
  1650. ggml_set_no_alloc(ctx, use_mmap);
  1651. }
  1652. n_created++;
  1653. return tensor;
  1654. }
  1655. struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend) {
  1656. struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
  1657. if (cur == NULL) {
  1658. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
  1659. }
  1660. if (backend == GGML_BACKEND_GPU_SPLIT) {
  1661. if (ne.size() == 1) {
  1662. throw std::runtime_error(format("%s: 1-dimensional tensor '%s' cannot be split on the GPU", __func__, name.c_str()));
  1663. }
  1664. }
  1665. {
  1666. bool is_ok = true;
  1667. for (size_t i = 0; i < ne.size(); ++i) {
  1668. if (ne[i] != cur->ne[i]) {
  1669. is_ok = false;
  1670. break;
  1671. }
  1672. }
  1673. if (!is_ok) {
  1674. throw std::runtime_error(
  1675. format("%s: tensor '%s' has wrong shape; expected %s, got %s",
  1676. __func__, name.c_str(),
  1677. llama_format_tensor_shape(ne).c_str(),
  1678. llama_format_tensor_shape(cur).c_str()));
  1679. }
  1680. }
  1681. return create_tensor_for(ctx, cur, backend);
  1682. }
  1683. void done_getting_tensors() const {
  1684. if (n_created != n_tensors) {
  1685. throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
  1686. }
  1687. }
  1688. size_t file_offset(const char * name) const {
  1689. const int idx = gguf_find_tensor(ctx_gguf, name);
  1690. if (idx < 0) {
  1691. throw std::runtime_error(format("%s: tensor '%s' not found in the file", __func__, name));
  1692. }
  1693. return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx);
  1694. }
  1695. void load_data_for(struct ggml_tensor * cur) const {
  1696. const size_t offs = file_offset(ggml_get_name(cur));
  1697. if (use_mmap) {
  1698. cur->data = (uint8_t *) mapping->addr + offs;
  1699. } else {
  1700. file.seek(offs, SEEK_SET);
  1701. file.read_raw(cur->data, ggml_nbytes(cur));
  1702. }
  1703. }
  1704. void load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
  1705. size_t size_data = 0;
  1706. size_t size_lock = 0;
  1707. size_t size_pref = 0; // prefetch
  1708. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  1709. struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
  1710. size_data += ggml_nbytes(cur);
  1711. if (cur->backend == GGML_BACKEND_CPU) {
  1712. size_pref += ggml_nbytes(cur);
  1713. }
  1714. }
  1715. if (use_mmap) {
  1716. mapping.reset(new llama_mmap(&file, size_pref, ggml_is_numa()));
  1717. if (lmlock) {
  1718. lmlock->init(mapping->addr);
  1719. }
  1720. }
  1721. size_t done_size = 0;
  1722. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  1723. struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
  1724. GGML_ASSERT(cur); // unused tensors should have been caught by load_data already
  1725. if (progress_callback) {
  1726. progress_callback((float) done_size / size_data, progress_callback_user_data);
  1727. }
  1728. // allocate temp buffer if not using mmap
  1729. if (!use_mmap && cur->data == NULL) {
  1730. GGML_ASSERT(cur->backend != GGML_BACKEND_CPU);
  1731. #ifdef GGML_USE_CPU_HBM
  1732. cur->data = (uint8_t*)hbw_malloc(ggml_nbytes(cur));
  1733. #else
  1734. cur->data = (uint8_t*)malloc(ggml_nbytes(cur));
  1735. #endif
  1736. }
  1737. load_data_for(cur);
  1738. switch (cur->backend) {
  1739. case GGML_BACKEND_CPU:
  1740. if (use_mmap && lmlock) {
  1741. size_lock += ggml_nbytes(cur);
  1742. lmlock->grow_to(size_lock);
  1743. }
  1744. break;
  1745. #ifdef GGML_USE_CUBLAS
  1746. case GGML_BACKEND_GPU:
  1747. case GGML_BACKEND_GPU_SPLIT:
  1748. // old code:
  1749. //ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor);
  1750. // TODO: test if this works !!
  1751. ggml_cuda_transform_tensor(cur->data, cur);
  1752. if (!use_mmap) {
  1753. free(cur->data);
  1754. }
  1755. break;
  1756. #elif defined(GGML_USE_CLBLAST)
  1757. case GGML_BACKEND_GPU:
  1758. ggml_cl_transform_tensor(cur->data, cur);
  1759. if (!use_mmap) {
  1760. free(cur->data);
  1761. }
  1762. break;
  1763. #endif
  1764. default:
  1765. continue;
  1766. }
  1767. done_size += ggml_nbytes(cur);
  1768. }
  1769. }
  1770. };
  1771. //
  1772. // load LLaMA models
  1773. //
  1774. static std::string llama_model_arch_name(llm_arch arch) {
  1775. auto it = LLM_ARCH_NAMES.find(arch);
  1776. if (it == LLM_ARCH_NAMES.end()) {
  1777. return "unknown";
  1778. }
  1779. return it->second;
  1780. }
  1781. static std::string llama_model_ftype_name(llama_ftype ftype) {
  1782. if (ftype & LLAMA_FTYPE_GUESSED) {
  1783. return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
  1784. }
  1785. switch (ftype) {
  1786. case LLAMA_FTYPE_ALL_F32: return "all F32";
  1787. case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
  1788. case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
  1789. case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
  1790. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  1791. return "mostly Q4_1, some F16";
  1792. case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0";
  1793. case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1";
  1794. case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0";
  1795. // K-quants
  1796. case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K";
  1797. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small";
  1798. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium";
  1799. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large";
  1800. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "mostly Q4_K - Small";
  1801. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium";
  1802. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small";
  1803. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium";
  1804. case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K";
  1805. default: return "unknown, may not work";
  1806. }
  1807. }
  1808. static const char * llama_model_type_name(e_model type) {
  1809. switch (type) {
  1810. case MODEL_1B: return "1B";
  1811. case MODEL_3B: return "3B";
  1812. case MODEL_7B: return "7B";
  1813. case MODEL_8B: return "8B";
  1814. case MODEL_13B: return "13B";
  1815. case MODEL_15B: return "15B";
  1816. case MODEL_30B: return "30B";
  1817. case MODEL_34B: return "34B";
  1818. case MODEL_40B: return "40B";
  1819. case MODEL_65B: return "65B";
  1820. case MODEL_70B: return "70B";
  1821. default: return "?B";
  1822. }
  1823. }
  1824. static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
  1825. model.arch = ml.get_arch();
  1826. if (model.arch == LLM_ARCH_UNKNOWN) {
  1827. throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
  1828. }
  1829. }
  1830. static void llm_load_hparams(
  1831. llama_model_loader & ml,
  1832. llama_model & model) {
  1833. struct gguf_context * ctx = ml.ctx_gguf;
  1834. const auto kv = LLM_KV(model.arch);
  1835. auto & hparams = model.hparams;
  1836. // get metadata as string
  1837. for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
  1838. enum gguf_type type = gguf_get_kv_type(ctx, i);
  1839. if (type == GGUF_TYPE_ARRAY) {
  1840. continue;
  1841. }
  1842. const char * name = gguf_get_key(ctx, i);
  1843. const std::string value = gguf_kv_to_str(ctx, i);
  1844. model.gguf_kv.emplace(name, value);
  1845. }
  1846. // get general kv
  1847. GGUF_GET_KEY(ctx, model.name, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_GENERAL_NAME));
  1848. // get hparams kv
  1849. GGUF_GET_KEY(ctx, hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, kv(LLM_KV_TOKENIZER_LIST));
  1850. GGUF_GET_KEY(ctx, hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_CONTEXT_LENGTH));
  1851. GGUF_GET_KEY(ctx, hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_EMBEDDING_LENGTH));
  1852. GGUF_GET_KEY(ctx, hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_FEED_FORWARD_LENGTH));
  1853. GGUF_GET_KEY(ctx, hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_ATTENTION_HEAD_COUNT));
  1854. GGUF_GET_KEY(ctx, hparams.n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_BLOCK_COUNT));
  1855. // n_head_kv is optional, default to n_head
  1856. hparams.n_head_kv = hparams.n_head;
  1857. GGUF_GET_KEY(ctx, hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ATTENTION_HEAD_COUNT_KV));
  1858. hparams.rope_finetuned = false;
  1859. GGUF_GET_KEY(ctx, hparams.rope_finetuned, gguf_get_val_bool, GGUF_TYPE_BOOL, false,
  1860. kv(LLM_KV_ROPE_SCALING_FINETUNED));
  1861. hparams.n_yarn_orig_ctx = hparams.n_ctx_train;
  1862. GGUF_GET_KEY(ctx, hparams.n_yarn_orig_ctx, gguf_get_val_u32, GGUF_TYPE_UINT32, false,
  1863. kv(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN));
  1864. // rope_freq_base (optional)
  1865. hparams.rope_freq_base_train = 10000.0f;
  1866. GGUF_GET_KEY(ctx, hparams.rope_freq_base_train, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE));
  1867. std::string rope_scaling("linear");
  1868. GGUF_GET_KEY(ctx, rope_scaling, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_ROPE_SCALING_TYPE));
  1869. hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
  1870. GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_UNSPECIFIED);
  1871. // rope_freq_scale (inverse of the kv) is optional
  1872. float ropescale = 0.0f;
  1873. GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALING_FACTOR));
  1874. if (ropescale == 0.0f) { // try the old key name
  1875. GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR));
  1876. }
  1877. hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
  1878. // sanity check for n_rot (optional)
  1879. {
  1880. hparams.n_rot = hparams.n_embd / hparams.n_head;
  1881. GGUF_GET_KEY(ctx, hparams.n_rot, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ROPE_DIMENSION_COUNT));
  1882. if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
  1883. if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
  1884. throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
  1885. }
  1886. }
  1887. // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
  1888. // gpt-j n_rot = rotary_dim
  1889. }
  1890. // arch-specific KVs
  1891. switch (model.arch) {
  1892. case LLM_ARCH_LLAMA:
  1893. {
  1894. GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
  1895. switch (hparams.n_layer) {
  1896. case 26: model.type = e_model::MODEL_3B; break;
  1897. case 32: model.type = e_model::MODEL_7B; break;
  1898. case 40: model.type = e_model::MODEL_13B; break;
  1899. case 48: model.type = e_model::MODEL_34B; break;
  1900. case 60: model.type = e_model::MODEL_30B; break;
  1901. case 80: model.type = hparams.n_head == hparams.n_head_kv ? e_model::MODEL_65B : e_model::MODEL_70B; break;
  1902. default: model.type = e_model::MODEL_UNKNOWN;
  1903. }
  1904. } break;
  1905. case LLM_ARCH_FALCON:
  1906. {
  1907. GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
  1908. switch (hparams.n_layer) {
  1909. case 32: model.type = e_model::MODEL_7B; break;
  1910. case 60: model.type = e_model::MODEL_40B; break;
  1911. default: model.type = e_model::MODEL_UNKNOWN;
  1912. }
  1913. } break;
  1914. case LLM_ARCH_BAICHUAN:
  1915. {
  1916. GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
  1917. switch (hparams.n_layer) {
  1918. case 32: model.type = e_model::MODEL_7B; break;
  1919. case 40: model.type = e_model::MODEL_13B; break;
  1920. default: model.type = e_model::MODEL_UNKNOWN;
  1921. }
  1922. } break;
  1923. case LLM_ARCH_STARCODER:
  1924. {
  1925. GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
  1926. switch (hparams.n_layer) {
  1927. case 24: model.type = e_model::MODEL_1B; break;
  1928. case 36: model.type = e_model::MODEL_3B; break;
  1929. case 42: model.type = e_model::MODEL_7B; break;
  1930. case 40: model.type = e_model::MODEL_15B; break;
  1931. default: model.type = e_model::MODEL_UNKNOWN;
  1932. }
  1933. } break;
  1934. case LLM_ARCH_PERSIMMON:
  1935. {
  1936. GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
  1937. switch (hparams.n_layer) {
  1938. case 36: model.type = e_model::MODEL_8B; break;
  1939. default: model.type = e_model::MODEL_UNKNOWN;
  1940. }
  1941. } break;
  1942. case LLM_ARCH_REFACT:
  1943. {
  1944. GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
  1945. switch (hparams.n_layer) {
  1946. case 32: model.type = e_model::MODEL_1B; break;
  1947. default: model.type = e_model::MODEL_UNKNOWN;
  1948. }
  1949. } break;
  1950. case LLM_ARCH_BLOOM:
  1951. {
  1952. GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
  1953. switch (hparams.n_layer) {
  1954. case 24: model.type = e_model::MODEL_1B; break;
  1955. case 30:
  1956. switch (hparams.n_embd) {
  1957. case 2560: model.type = e_model::MODEL_3B; break;
  1958. case 4096: model.type = e_model::MODEL_7B; break;
  1959. } break;
  1960. }
  1961. } break;
  1962. case LLM_ARCH_MPT:
  1963. {
  1964. hparams.f_clamp_kqv = 0.0f;
  1965. GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
  1966. GGUF_GET_KEY(ctx, hparams.f_clamp_kqv, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ATTENTION_CLAMP_KQV));
  1967. GGUF_GET_KEY(ctx, hparams.f_max_alibi_bias, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_MAX_ALIBI_BIAS));
  1968. switch (hparams.n_layer) {
  1969. case 32: model.type = e_model::MODEL_7B; break;
  1970. case 48: model.type = e_model::MODEL_30B; break;
  1971. default: model.type = e_model::MODEL_UNKNOWN;
  1972. }
  1973. } break;
  1974. case LLM_ARCH_STABLELM:
  1975. {
  1976. GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
  1977. switch (hparams.n_layer) {
  1978. case 32: model.type = e_model::MODEL_3B; break;
  1979. default: model.type = e_model::MODEL_UNKNOWN;
  1980. }
  1981. } break;
  1982. default: (void)0;
  1983. }
  1984. model.ftype = ml.ftype;
  1985. }
  1986. // TODO: This should probably be in llama.h
  1987. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special = false);
  1988. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch);
  1989. static void llm_load_vocab(
  1990. llama_model_loader & ml,
  1991. llama_model & model) {
  1992. auto & vocab = model.vocab;
  1993. struct gguf_context * ctx = ml.ctx_gguf;
  1994. const auto kv = LLM_KV(model.arch);
  1995. const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
  1996. if (token_idx == -1) {
  1997. throw std::runtime_error("cannot find tokenizer vocab in model file\n");
  1998. }
  1999. const float * scores = nullptr;
  2000. const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
  2001. if (score_idx != -1) {
  2002. scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
  2003. }
  2004. const int * toktypes = nullptr;
  2005. const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
  2006. if (toktype_idx != -1) {
  2007. toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
  2008. }
  2009. // determine vocab type
  2010. {
  2011. std::string tokenizer_name;
  2012. GGUF_GET_KEY(ctx, tokenizer_name, gguf_get_val_str, GGUF_TYPE_STRING, true, kv(LLM_KV_TOKENIZER_MODEL));
  2013. if (tokenizer_name == "llama") {
  2014. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  2015. // default special tokens
  2016. vocab.special_bos_id = 1;
  2017. vocab.special_eos_id = 2;
  2018. vocab.special_unk_id = 0;
  2019. vocab.special_sep_id = -1;
  2020. vocab.special_pad_id = -1;
  2021. } else if (tokenizer_name == "gpt2") {
  2022. vocab.type = LLAMA_VOCAB_TYPE_BPE;
  2023. // read bpe merges and populate bpe ranks
  2024. const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
  2025. if (merges_keyidx == -1) {
  2026. throw std::runtime_error("cannot find tokenizer merges in model file\n");
  2027. }
  2028. const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
  2029. for (int i = 0; i < n_merges; i++) {
  2030. const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
  2031. GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
  2032. std::string first;
  2033. std::string second;
  2034. const size_t pos = word.find(' ', 1);
  2035. if (pos != std::string::npos) {
  2036. first = word.substr(0, pos);
  2037. second = word.substr(pos + 1);
  2038. }
  2039. vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
  2040. }
  2041. // default special tokens
  2042. vocab.special_bos_id = 11;
  2043. vocab.special_eos_id = 11;
  2044. vocab.special_unk_id = -1;
  2045. vocab.special_sep_id = -1;
  2046. vocab.special_pad_id = -1;
  2047. } else {
  2048. LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str());
  2049. LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__);
  2050. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  2051. }
  2052. }
  2053. const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
  2054. vocab.id_to_token.resize(n_vocab);
  2055. for (uint32_t i = 0; i < n_vocab; i++) {
  2056. std::string word = gguf_get_arr_str(ctx, token_idx, i);
  2057. GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
  2058. vocab.token_to_id[word] = i;
  2059. auto & token_data = vocab.id_to_token[i];
  2060. token_data.text = std::move(word);
  2061. token_data.score = scores ? scores[i] : 0.0f;
  2062. token_data.type = toktypes ? (llama_token_type) toktypes[i] : LLAMA_TOKEN_TYPE_NORMAL;
  2063. }
  2064. GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
  2065. // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
  2066. if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
  2067. vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
  2068. } else {
  2069. const std::vector<int> ids = llama_tokenize_internal(vocab, "\u010A", false);
  2070. GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
  2071. vocab.linefeed_id = ids[0];
  2072. }
  2073. // special tokens
  2074. {
  2075. const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
  2076. { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
  2077. { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
  2078. { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
  2079. { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
  2080. { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
  2081. };
  2082. for (const auto & it : special_token_types) {
  2083. const std::string & key = kv(std::get<0>(it));
  2084. int32_t & id = std::get<1>(it), old_id = id;
  2085. GGUF_GET_KEY(ctx, id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, key);
  2086. // Must be >= -1 and < vocab size. Since the key is unsigned, -1
  2087. // can only come from the default value, so there's no point in
  2088. // validating that.
  2089. if (size_t(id + 1) > vocab.id_to_token.size()) {
  2090. LLAMA_LOG_WARN("%s: bad special token: '%s' = %d, using default id %d\n",
  2091. __func__, key.c_str(), id, old_id);
  2092. id = old_id;
  2093. }
  2094. }
  2095. // Handle add_bos_token and add_eos_token
  2096. std::string key = kv(LLM_KV_TOKENIZER_ADD_BOS);
  2097. int kid = gguf_find_key(ctx, key.c_str());
  2098. enum gguf_type ktype = kid < 0 ? GGUF_TYPE_COUNT : gguf_get_kv_type(ctx, kid);
  2099. vocab.special_add_bos = ktype == GGUF_TYPE_BOOL ? gguf_get_val_bool(ctx, kid) : -1;
  2100. if (ktype != GGUF_TYPE_BOOL && ktype != GGUF_TYPE_COUNT) {
  2101. LLAMA_LOG_WARN("%s: bad field type %d for '%s' - ignoring\n", __func__, ktype, key.c_str());
  2102. }
  2103. key = kv(LLM_KV_TOKENIZER_ADD_EOS);
  2104. kid = gguf_find_key(ctx, key.c_str());
  2105. ktype = kid < 0 ? GGUF_TYPE_COUNT : gguf_get_kv_type(ctx, kid);
  2106. vocab.special_add_eos = ktype == GGUF_TYPE_BOOL ? gguf_get_val_bool(ctx, kid) : -1;
  2107. if (ktype != GGUF_TYPE_BOOL && ktype != GGUF_TYPE_COUNT) {
  2108. LLAMA_LOG_WARN("%s: bad field type %d for '%s' - ignoring\n", __func__, ktype, key.c_str());
  2109. }
  2110. }
  2111. // build special tokens cache
  2112. {
  2113. // TODO: It is unclear (to me) at this point, whether special tokes are guaranteed to be of a deterministic type,
  2114. // and will always be correctly labeled in 'added_tokens.json' etc.
  2115. // The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
  2116. // to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
  2117. // are special tokens.
  2118. // From testing, this appears to corelate 1:1 with special tokens.
  2119. //
  2120. // Counting special tokens and verifying in only one direction
  2121. // is sufficient to detect difference in those two sets.
  2122. //
  2123. uint32_t special_tokens_count_by_type = 0;
  2124. uint32_t special_tokens_count_from_verification = 0;
  2125. bool special_tokens_definition_mismatch = false;
  2126. for (const auto & t : vocab.token_to_id) {
  2127. const auto & token = t.first;
  2128. const auto & id = t.second;
  2129. // Count all non-normal tokens in the vocab while iterating
  2130. if (vocab.id_to_token[id].type != LLAMA_TOKEN_TYPE_NORMAL) {
  2131. special_tokens_count_by_type++;
  2132. }
  2133. // Skip single character tokens
  2134. if (token.length() > 1) {
  2135. bool is_tokenizable = false;
  2136. // Split token string representation in two, in all possible ways
  2137. // and check if both halves can be matched to a valid token
  2138. for (unsigned i = 1; i < token.length();) {
  2139. const auto left = token.substr(0, i);
  2140. const auto right = token.substr(i);
  2141. // check if we didnt partition in the middle of a utf sequence
  2142. auto utf = utf8_len(left.at(left.length() - 1));
  2143. if (utf == 1) {
  2144. if (vocab.token_to_id.find(left) != vocab.token_to_id.end() &&
  2145. vocab.token_to_id.find(right) != vocab.token_to_id.end() ) {
  2146. is_tokenizable = true;
  2147. break;
  2148. }
  2149. i++;
  2150. } else {
  2151. // skip over the rest of multibyte utf sequence
  2152. i += utf - 1;
  2153. }
  2154. }
  2155. if (!is_tokenizable) {
  2156. // Some tokens are multibyte, but they are utf sequences with equivalent text length of 1
  2157. // it's faster to re-filter them here, since there are way less candidates now
  2158. // Calculate a total "utf" length of a token string representation
  2159. size_t utf8_str_len = 0;
  2160. for (unsigned i = 0; i < token.length();) {
  2161. utf8_str_len++;
  2162. i += utf8_len(token.at(i));
  2163. }
  2164. // And skip the ones which are one character
  2165. if (utf8_str_len > 1) {
  2166. // At this point what we have left are special tokens only
  2167. vocab.special_tokens_cache[token] = id;
  2168. // Count manually found special tokens
  2169. special_tokens_count_from_verification++;
  2170. // If this manually found special token is not marked as such, flag a mismatch
  2171. if (vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL) {
  2172. special_tokens_definition_mismatch = true;
  2173. }
  2174. }
  2175. }
  2176. }
  2177. }
  2178. if (special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type) {
  2179. LLAMA_LOG_WARN("%s: mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
  2180. __func__,
  2181. special_tokens_count_from_verification, vocab.id_to_token.size(),
  2182. special_tokens_count_by_type, vocab.id_to_token.size()
  2183. );
  2184. } else {
  2185. LLAMA_LOG_INFO("%s: special tokens definition check successful ( %u/%zu ).\n",
  2186. __func__,
  2187. special_tokens_count_from_verification, vocab.id_to_token.size()
  2188. );
  2189. }
  2190. }
  2191. }
  2192. static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
  2193. const auto & hparams = model.hparams;
  2194. const auto & vocab = model.vocab;
  2195. const auto rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
  2196. // hparams
  2197. LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
  2198. LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch).c_str());
  2199. LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, vocab.type == LLAMA_VOCAB_TYPE_SPM ? "SPM" : "BPE"); // TODO: fix
  2200. LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  2201. LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
  2202. LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
  2203. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  2204. LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
  2205. LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
  2206. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  2207. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
  2208. LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
  2209. LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
  2210. LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
  2211. LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
  2212. LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
  2213. LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
  2214. LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type.c_str());
  2215. LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
  2216. LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
  2217. LLAMA_LOG_INFO("%s: n_yarn_orig_ctx = %u\n", __func__, hparams.n_yarn_orig_ctx);
  2218. LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
  2219. LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
  2220. LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
  2221. LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
  2222. if (ml.n_bytes < GiB) {
  2223. LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  2224. } else {
  2225. LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  2226. }
  2227. // general kv
  2228. LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
  2229. // special tokens
  2230. if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
  2231. if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
  2232. if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
  2233. if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
  2234. if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
  2235. if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
  2236. }
  2237. static void llm_load_tensors(
  2238. llama_model_loader & ml,
  2239. llama_model & model,
  2240. int n_gpu_layers,
  2241. int main_gpu,
  2242. const float * tensor_split,
  2243. bool use_mlock,
  2244. llama_progress_callback progress_callback,
  2245. void * progress_callback_user_data) {
  2246. model.t_start_us = ggml_time_us();
  2247. auto & ctx = model.ctx;
  2248. auto & hparams = model.hparams;
  2249. model.n_gpu_layers = n_gpu_layers;
  2250. size_t ctx_size;
  2251. size_t mmapped_size;
  2252. ml.calc_sizes(ctx_size, mmapped_size);
  2253. LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, ctx_size/1024.0/1024.0);
  2254. // create the ggml context
  2255. {
  2256. model.buf.resize(ctx_size);
  2257. if (use_mlock) {
  2258. model.mlock_buf.init (model.buf.data);
  2259. model.mlock_buf.grow_to(model.buf.size);
  2260. }
  2261. struct ggml_init_params params = {
  2262. /*.mem_size =*/ model.buf.size,
  2263. /*.mem_buffer =*/ model.buf.data,
  2264. /*.no_alloc =*/ ml.use_mmap,
  2265. };
  2266. model.ctx = ggml_init(params);
  2267. if (!model.ctx) {
  2268. throw std::runtime_error(format("ggml_init() failed"));
  2269. }
  2270. }
  2271. (void) main_gpu;
  2272. enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU;
  2273. enum ggml_backend_type llama_backend_offload_split = GGML_BACKEND_CPU;
  2274. #ifdef GGML_USE_CUBLAS
  2275. if (ggml_cublas_loaded()) {
  2276. LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__);
  2277. ggml_cuda_set_main_device(main_gpu);
  2278. llama_backend_offload = GGML_BACKEND_GPU;
  2279. llama_backend_offload_split = GGML_BACKEND_GPU_SPLIT;
  2280. }
  2281. #elif defined(GGML_USE_CLBLAST)
  2282. LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
  2283. llama_backend_offload = GGML_BACKEND_GPU;
  2284. llama_backend_offload_split = GGML_BACKEND_GPU;
  2285. #endif
  2286. // prepare memory for the weights
  2287. size_t vram_weights = 0;
  2288. {
  2289. const int64_t n_embd = hparams.n_embd;
  2290. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  2291. const int64_t n_layer = hparams.n_layer;
  2292. const int64_t n_vocab = hparams.n_vocab;
  2293. const auto tn = LLM_TN(model.arch);
  2294. switch (model.arch) {
  2295. case LLM_ARCH_LLAMA:
  2296. case LLM_ARCH_REFACT:
  2297. {
  2298. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2299. // output
  2300. {
  2301. ggml_backend_type backend_norm;
  2302. ggml_backend_type backend_output;
  2303. if (n_gpu_layers > int(n_layer)) {
  2304. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  2305. // on Windows however this is detrimental unless everything is on the GPU
  2306. #ifndef _WIN32
  2307. backend_norm = llama_backend_offload;
  2308. #else
  2309. backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload;
  2310. #endif // _WIN32
  2311. backend_output = llama_backend_offload_split;
  2312. } else {
  2313. backend_norm = GGML_BACKEND_CPU;
  2314. backend_output = GGML_BACKEND_CPU;
  2315. }
  2316. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2317. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2318. if (backend_norm == GGML_BACKEND_GPU) {
  2319. vram_weights += ggml_nbytes(model.output_norm);
  2320. }
  2321. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  2322. vram_weights += ggml_nbytes(model.output);
  2323. }
  2324. }
  2325. const uint32_t n_ff = hparams.n_ff;
  2326. const int i_gpu_start = n_layer - n_gpu_layers;
  2327. model.layers.resize(n_layer);
  2328. for (uint32_t i = 0; i < n_layer; ++i) {
  2329. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2330. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2331. auto & layer = model.layers[i];
  2332. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2333. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  2334. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2335. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2336. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2337. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2338. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  2339. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2340. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2341. if (backend == GGML_BACKEND_GPU) {
  2342. vram_weights +=
  2343. ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
  2344. ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
  2345. ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up);
  2346. }
  2347. }
  2348. } break;
  2349. case LLM_ARCH_BAICHUAN:
  2350. {
  2351. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2352. {
  2353. ggml_backend_type backend_norm;
  2354. ggml_backend_type backend_output;
  2355. if (n_gpu_layers > int(n_layer)) {
  2356. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  2357. // on Windows however this is detrimental unless everything is on the GPU
  2358. #ifndef _WIN32
  2359. backend_norm = llama_backend_offload;
  2360. #else
  2361. backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload;
  2362. #endif // _WIN32
  2363. backend_output = llama_backend_offload_split;
  2364. } else {
  2365. backend_norm = GGML_BACKEND_CPU;
  2366. backend_output = GGML_BACKEND_CPU;
  2367. }
  2368. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2369. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2370. if (backend_norm == GGML_BACKEND_GPU) {
  2371. vram_weights += ggml_nbytes(model.output_norm);
  2372. }
  2373. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  2374. vram_weights += ggml_nbytes(model.output);
  2375. }
  2376. }
  2377. const uint32_t n_ff = hparams.n_ff;
  2378. const int i_gpu_start = n_layer - n_gpu_layers;
  2379. model.layers.resize(n_layer);
  2380. for (uint32_t i = 0; i < n_layer; ++i) {
  2381. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2382. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2383. auto & layer = model.layers[i];
  2384. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2385. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  2386. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2387. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2388. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2389. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2390. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  2391. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2392. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2393. if (backend == GGML_BACKEND_GPU) {
  2394. vram_weights +=
  2395. ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
  2396. ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
  2397. ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up);
  2398. }
  2399. }
  2400. } break;
  2401. case LLM_ARCH_FALCON:
  2402. {
  2403. // TODO: CPU-only for now
  2404. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2405. // output
  2406. {
  2407. ggml_backend_type backend_norm;
  2408. ggml_backend_type backend_output;
  2409. if (n_gpu_layers > int(n_layer)) {
  2410. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  2411. // on Windows however this is detrimental unless everything is on the GPU
  2412. #ifndef _WIN32
  2413. backend_norm = llama_backend_offload;
  2414. #else
  2415. backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload;
  2416. #endif // _WIN32
  2417. backend_output = llama_backend_offload_split;
  2418. } else {
  2419. backend_norm = GGML_BACKEND_CPU;
  2420. backend_output = GGML_BACKEND_CPU;
  2421. }
  2422. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2423. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2424. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2425. if (backend_norm == GGML_BACKEND_GPU) {
  2426. vram_weights += ggml_nbytes(model.output_norm);
  2427. vram_weights += ggml_nbytes(model.output_norm_b);
  2428. }
  2429. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  2430. vram_weights += ggml_nbytes(model.output);
  2431. }
  2432. }
  2433. const uint32_t n_ff = hparams.n_ff;
  2434. const int i_gpu_start = n_layer - n_gpu_layers;
  2435. model.layers.resize(n_layer);
  2436. for (uint32_t i = 0; i < n_layer; ++i) {
  2437. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2438. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2439. auto & layer = model.layers[i];
  2440. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2441. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2442. if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i).c_str()) >= 0) {
  2443. layer.attn_norm_2 = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, backend);
  2444. layer.attn_norm_2_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, backend);
  2445. if (backend == GGML_BACKEND_GPU) {
  2446. vram_weights += ggml_nbytes(layer.attn_norm_2);
  2447. vram_weights += ggml_nbytes(layer.attn_norm_2_b);
  2448. }
  2449. }
  2450. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2451. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2452. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2453. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2454. if (backend == GGML_BACKEND_GPU) {
  2455. vram_weights +=
  2456. ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
  2457. ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.wo) +
  2458. ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up);
  2459. }
  2460. }
  2461. } break;
  2462. case LLM_ARCH_STARCODER:
  2463. {
  2464. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2465. model.pos_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU);
  2466. // output
  2467. {
  2468. ggml_backend_type backend_norm;
  2469. ggml_backend_type backend_output;
  2470. if (n_gpu_layers > int(n_layer)) {
  2471. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  2472. // on Windows however this is detrimental unless everything is on the GPU
  2473. #ifndef _WIN32
  2474. backend_norm = llama_backend_offload;
  2475. #else
  2476. backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload;
  2477. #endif // _WIN32
  2478. backend_output = llama_backend_offload_split;
  2479. } else {
  2480. backend_norm = GGML_BACKEND_CPU;
  2481. backend_output = GGML_BACKEND_CPU;
  2482. }
  2483. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2484. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2485. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2486. if (backend_norm == GGML_BACKEND_GPU) {
  2487. vram_weights += ggml_nbytes(model.output_norm);
  2488. vram_weights += ggml_nbytes(model.output_norm_b);
  2489. }
  2490. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  2491. vram_weights += ggml_nbytes(model.output);
  2492. }
  2493. }
  2494. const uint32_t n_ff = hparams.n_ff;
  2495. const int i_gpu_start = n_layer - n_gpu_layers;
  2496. model.layers.resize(n_layer);
  2497. for (uint32_t i = 0; i < n_layer; ++i) {
  2498. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2499. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2500. auto & layer = model.layers[i];
  2501. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2502. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2503. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2504. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  2505. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2506. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  2507. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2508. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  2509. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  2510. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  2511. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2512. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  2513. if (backend == GGML_BACKEND_GPU) {
  2514. vram_weights +=
  2515. ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
  2516. ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) +
  2517. ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) +
  2518. ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_norm_b) +
  2519. ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b) +
  2520. ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b);
  2521. }
  2522. }
  2523. } break;
  2524. case LLM_ARCH_PERSIMMON:
  2525. {
  2526. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2527. {
  2528. ggml_backend_type backend_norm;
  2529. ggml_backend_type backend_output;
  2530. if (n_gpu_layers > int(n_layer)) {
  2531. #ifdef GGML_USE_CUBLAS
  2532. if (n_gpu_layers > int(n_layer + 1)) {
  2533. LLAMA_LOG_ERROR("%s: CUDA backend missing Persimmon CUDA ops, can offload at most %ld layers. See: https://github.com/ggerganov/llama.cpp/issues/4038\n",
  2534. __func__, n_layer + 1);
  2535. throw std::runtime_error("Persimmon CUDA offload failed");
  2536. }
  2537. #endif
  2538. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  2539. // on Windows however this is detrimental unless everything is on the GPU
  2540. #ifndef _WIN32
  2541. backend_norm = llama_backend_offload;
  2542. #else
  2543. backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload;
  2544. #endif // _WIN32
  2545. backend_output = llama_backend_offload_split;
  2546. } else {
  2547. backend_norm = GGML_BACKEND_CPU;
  2548. backend_output = GGML_BACKEND_CPU;
  2549. }
  2550. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2551. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2552. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2553. if (backend_norm == GGML_BACKEND_GPU) {
  2554. vram_weights += ggml_nbytes(model.output_norm);
  2555. vram_weights += ggml_nbytes(model.output_norm_b);
  2556. }
  2557. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  2558. vram_weights += ggml_nbytes(model.output);
  2559. }
  2560. }
  2561. const uint32_t n_ff = hparams.n_ff;
  2562. const int i_gpu_start = n_layer - n_gpu_layers;
  2563. model.layers.resize(n_layer);
  2564. for (uint32_t i = 0; i < n_layer; ++i) {
  2565. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload;
  2566. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split;
  2567. auto & layer = model.layers[i];
  2568. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2569. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2570. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2571. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  2572. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2573. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  2574. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  2575. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  2576. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2577. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  2578. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2579. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  2580. layer.attn_q_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64}, backend);
  2581. layer.attn_q_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64}, backend);
  2582. layer.attn_k_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {64}, backend);
  2583. layer.attn_k_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64}, backend);
  2584. }
  2585. } break;
  2586. case LLM_ARCH_BLOOM:
  2587. {
  2588. // TODO: CPU-only for now
  2589. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2590. model.tok_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, GGML_BACKEND_CPU);
  2591. model.tok_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, GGML_BACKEND_CPU);
  2592. // output
  2593. {
  2594. ggml_backend_type backend_norm;
  2595. ggml_backend_type backend_output;
  2596. if (n_gpu_layers > int(n_layer)) {
  2597. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  2598. // on Windows however this is detrimental unless everything is on the GPU
  2599. #ifndef _WIN32
  2600. backend_norm = llama_backend_offload;
  2601. #else
  2602. backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload;
  2603. #endif // _WIN32
  2604. backend_output = llama_backend_offload_split;
  2605. } else {
  2606. backend_norm = GGML_BACKEND_CPU;
  2607. backend_output = GGML_BACKEND_CPU;
  2608. }
  2609. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2610. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2611. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2612. if (backend_norm == GGML_BACKEND_GPU) {
  2613. vram_weights += ggml_nbytes(model.output_norm);
  2614. vram_weights += ggml_nbytes(model.output_norm_b);
  2615. }
  2616. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  2617. vram_weights += ggml_nbytes(model.output);
  2618. }
  2619. }
  2620. const uint32_t n_ff = hparams.n_ff;
  2621. const int i_gpu_start = n_layer - n_gpu_layers;
  2622. model.layers.resize(n_layer);
  2623. for (uint32_t i = 0; i < n_layer; ++i) {
  2624. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2625. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2626. auto & layer = model.layers[i];
  2627. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2628. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2629. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2630. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  2631. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2632. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  2633. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2634. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  2635. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  2636. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  2637. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2638. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  2639. if (backend == GGML_BACKEND_GPU) {
  2640. vram_weights +=
  2641. ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
  2642. ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) +
  2643. ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) +
  2644. ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_norm_b) +
  2645. ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b) +
  2646. ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b);
  2647. }
  2648. }
  2649. } break;
  2650. case LLM_ARCH_MPT:
  2651. {
  2652. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2653. // output
  2654. {
  2655. ggml_backend_type backend_norm;
  2656. ggml_backend_type backend_output;
  2657. if (n_gpu_layers > int(n_layer)) {
  2658. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  2659. // on Windows however this is detrimental unless everything is on the GPU
  2660. #ifndef _WIN32
  2661. backend_norm = llama_backend_offload;
  2662. #else
  2663. backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload;
  2664. #endif // _WIN32
  2665. backend_output = llama_backend_offload_split;
  2666. } else {
  2667. backend_norm = GGML_BACKEND_CPU;
  2668. backend_output = GGML_BACKEND_CPU;
  2669. }
  2670. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2671. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2672. if (backend_norm == GGML_BACKEND_GPU) {
  2673. vram_weights += ggml_nbytes(model.output_norm);
  2674. }
  2675. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  2676. vram_weights += ggml_nbytes(model.output);
  2677. }
  2678. }
  2679. const uint32_t n_ff = hparams.n_ff;
  2680. const int i_gpu_start = n_layer - n_gpu_layers;
  2681. model.layers.resize(n_layer);
  2682. for (uint32_t i = 0; i < n_layer; ++i) {
  2683. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2684. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2685. auto & layer = model.layers[i];
  2686. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2687. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2688. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2689. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2690. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2691. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2692. if (backend == GGML_BACKEND_GPU) {
  2693. vram_weights +=
  2694. ggml_nbytes(layer.attn_norm) +
  2695. ggml_nbytes(layer.wqkv) +
  2696. ggml_nbytes(layer.wo) +
  2697. ggml_nbytes(layer.ffn_norm) +
  2698. ggml_nbytes(layer.ffn_down) +
  2699. ggml_nbytes(layer.ffn_up);
  2700. }
  2701. }
  2702. } break;
  2703. case LLM_ARCH_STABLELM:
  2704. {
  2705. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2706. // output
  2707. {
  2708. ggml_backend_type backend_norm;
  2709. ggml_backend_type backend_output;
  2710. if (n_gpu_layers > int(n_layer)) {
  2711. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  2712. // on Windows however this is detrimental unless everything is on the GPU
  2713. #ifndef _WIN32
  2714. backend_norm = llama_backend_offload;
  2715. #else
  2716. backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload;
  2717. #endif // _WIN32
  2718. backend_output = llama_backend_offload_split;
  2719. } else {
  2720. backend_norm = GGML_BACKEND_CPU;
  2721. backend_output = GGML_BACKEND_CPU;
  2722. }
  2723. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2724. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2725. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2726. if (backend_norm == GGML_BACKEND_GPU) {
  2727. vram_weights += ggml_nbytes(model.output_norm);
  2728. }
  2729. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  2730. vram_weights += ggml_nbytes(model.output);
  2731. }
  2732. }
  2733. const uint32_t n_ff = hparams.n_ff;
  2734. const int i_gpu_start = n_layer - n_gpu_layers;
  2735. model.layers.resize(n_layer);
  2736. for (uint32_t i = 0; i < n_layer; ++i) {
  2737. /*
  2738. llama_model_loader: - tensor 4: blk.0.attn_output.weight f16 [ 2560, 2560, 1, 1 ]
  2739. */
  2740. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2741. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2742. auto & layer = model.layers[i];
  2743. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2744. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2745. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  2746. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2747. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2748. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2749. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2750. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  2751. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  2752. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2753. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2754. if (backend == GGML_BACKEND_GPU) {
  2755. vram_weights +=
  2756. ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
  2757. ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
  2758. ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up);
  2759. }
  2760. }
  2761. } break;
  2762. default:
  2763. throw std::runtime_error("unknown architecture");
  2764. }
  2765. }
  2766. ml.done_getting_tensors();
  2767. // print memory requirements
  2768. {
  2769. // this is the total memory required to run the inference
  2770. size_t mem_required =
  2771. ctx_size +
  2772. mmapped_size - vram_weights; // weights in VRAM not in memory
  2773. LLAMA_LOG_INFO("%s: mem required = %7.2f MiB\n", __func__, mem_required / 1024.0 / 1024.0);
  2774. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  2775. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  2776. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  2777. if (n_gpu_layers > (int) hparams.n_layer) {
  2778. LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
  2779. }
  2780. #ifdef GGML_USE_CUBLAS
  2781. const int max_backend_supported_layers = hparams.n_layer + 3;
  2782. const int max_offloadable_layers = hparams.n_layer + 3;
  2783. #elif GGML_USE_CLBLAST
  2784. const int max_backend_supported_layers = hparams.n_layer + 1;
  2785. const int max_offloadable_layers = hparams.n_layer + 1;
  2786. #endif // GGML_USE_CUBLAS
  2787. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  2788. LLAMA_LOG_INFO("%s: VRAM used: %.2f MiB\n", __func__, vram_weights / 1024.0 / 1024.0);
  2789. #else
  2790. (void) n_gpu_layers;
  2791. #endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  2792. }
  2793. // populate `tensors_by_name`
  2794. for (int i = 0; i < ml.n_tensors; ++i) {
  2795. struct ggml_tensor * cur = ggml_get_tensor(ctx, ml.get_tensor_name(i));
  2796. model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
  2797. }
  2798. (void) tensor_split;
  2799. #ifdef GGML_USE_CUBLAS
  2800. {
  2801. ggml_cuda_set_tensor_split(tensor_split);
  2802. }
  2803. #endif
  2804. ml.load_all_data(ctx, progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL);
  2805. if (progress_callback) {
  2806. progress_callback(1.0f, progress_callback_user_data);
  2807. }
  2808. model.mapping = std::move(ml.mapping);
  2809. // loading time will be recalculate after the first eval, so
  2810. // we take page faults deferred by mmap() into consideration
  2811. model.t_load_us = ggml_time_us() - model.t_start_us;
  2812. }
  2813. static bool llama_model_load(const std::string & fname, llama_model & model, const llama_model_params & params) {
  2814. try {
  2815. llama_model_loader ml(fname, params.use_mmap);
  2816. model.hparams.vocab_only = params.vocab_only;
  2817. llm_load_arch (ml, model);
  2818. llm_load_hparams(ml, model);
  2819. llm_load_vocab (ml, model);
  2820. llm_load_print_meta(ml, model);
  2821. if (model.hparams.n_vocab != model.vocab.id_to_token.size()) {
  2822. throw std::runtime_error("vocab size mismatch");
  2823. }
  2824. if (params.vocab_only) {
  2825. LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
  2826. return true;
  2827. }
  2828. llm_load_tensors(
  2829. ml, model, params.n_gpu_layers, params.main_gpu, params.tensor_split, params.use_mlock,
  2830. params.progress_callback, params.progress_callback_user_data
  2831. );
  2832. } catch (const std::exception & err) {
  2833. LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
  2834. return false;
  2835. }
  2836. return true;
  2837. }
  2838. //
  2839. // llm_build
  2840. //
  2841. using llm_build_cb = std::function<void(struct ggml_tensor * cur, const char * name, int nl)>;
  2842. enum llm_rope_type {
  2843. LLM_ROPE,
  2844. LLM_ROPE_NEOX,
  2845. LLM_ROPE_GLM,
  2846. };
  2847. enum llm_ffn_op_type {
  2848. LLM_FFN_SILU,
  2849. LLM_FFN_GELU,
  2850. LLM_FFN_RELU,
  2851. LLM_FFN_RELU_SQR,
  2852. };
  2853. enum llm_ffn_gate_type {
  2854. LLM_FFN_SEQ,
  2855. LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
  2856. };
  2857. enum llm_norm_type {
  2858. LLM_NORM,
  2859. LLM_NORM_RMS,
  2860. };
  2861. static struct ggml_tensor * llm_build_inp_embd(
  2862. struct ggml_context * ctx,
  2863. const llama_hparams & hparams,
  2864. const llama_batch & batch,
  2865. struct ggml_tensor * tok_embd,
  2866. const llm_build_cb & cb) {
  2867. const int64_t n_embd = hparams.n_embd;
  2868. struct ggml_tensor * inpL;
  2869. if (batch.token) {
  2870. struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
  2871. cb(inp_tokens, "inp_tokens", -1);
  2872. inpL = ggml_get_rows(ctx, tok_embd, inp_tokens);
  2873. } else {
  2874. #ifdef GGML_USE_MPI
  2875. GGML_ASSERT(false && "not implemented");
  2876. #endif
  2877. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
  2878. }
  2879. return inpL;
  2880. }
  2881. // Persimmon: n_rot = n_embd_head/2
  2882. // Other: n_rot = n_embd_head
  2883. static void llm_build_k_shift(
  2884. struct ggml_context * ctx,
  2885. const llama_hparams & hparams,
  2886. const llama_cparams & cparams,
  2887. const llama_kv_cache & kv,
  2888. struct ggml_cgraph * graph,
  2889. llm_rope_type type,
  2890. int64_t n_ctx,
  2891. int64_t n_rot,
  2892. float freq_base,
  2893. float freq_scale,
  2894. const llm_build_cb & cb) {
  2895. const int64_t n_layer = hparams.n_layer;
  2896. const int64_t n_head_kv = hparams.n_head_kv;
  2897. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  2898. const int64_t n_embd_head = hparams.n_embd_head();
  2899. const int32_t n_orig_ctx = cparams.n_yarn_orig_ctx;
  2900. const float ext_factor = cparams.yarn_ext_factor;
  2901. const float attn_factor = cparams.yarn_attn_factor;
  2902. const float beta_fast = cparams.yarn_beta_fast;
  2903. const float beta_slow = cparams.yarn_beta_slow;
  2904. GGML_ASSERT(n_embd_head % n_rot == 0);
  2905. struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_ctx);
  2906. cb(K_shift, "K_shift", -1);
  2907. int rope_type = 0;
  2908. switch (type) {
  2909. case LLM_ROPE: rope_type = 0; break;
  2910. case LLM_ROPE_NEOX: rope_type = 2; break;
  2911. case LLM_ROPE_GLM: rope_type = 4; break;
  2912. }
  2913. for (int il = 0; il < n_layer; ++il) {
  2914. struct ggml_tensor * tmp =
  2915. // we rotate only the first n_rot dimensions
  2916. ggml_rope_custom_inplace(ctx,
  2917. ggml_view_3d(ctx, kv.k,
  2918. n_rot, n_head_kv, n_ctx,
  2919. ggml_element_size(kv.k)*n_embd_head,
  2920. ggml_element_size(kv.k)*n_embd_gqa,
  2921. ggml_element_size(kv.k)*n_embd_gqa*n_ctx*il),
  2922. K_shift, n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  2923. ext_factor, attn_factor, beta_fast, beta_slow);
  2924. cb(tmp, "K_shifted", il);
  2925. ggml_build_forward_expand(graph, tmp);
  2926. }
  2927. }
  2928. static void llm_build_kv_store(
  2929. struct ggml_context * ctx,
  2930. const llama_hparams & hparams,
  2931. const llama_kv_cache & kv,
  2932. struct ggml_cgraph * graph,
  2933. struct ggml_tensor * k_cur,
  2934. struct ggml_tensor * v_cur,
  2935. int64_t n_ctx,
  2936. int32_t n_tokens,
  2937. int32_t kv_head,
  2938. const llm_build_cb & cb,
  2939. int64_t il) {
  2940. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  2941. // compute the transposed [n_tokens, n_embd] V matrix
  2942. struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, n_embd_gqa, n_tokens));
  2943. //struct ggml_tensor * v_cur_t = ggml_transpose(ctx, v_cur); // TODO: reshape above is likely not needed
  2944. cb(v_cur_t, "v_cur_t", il);
  2945. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k, n_tokens*n_embd_gqa,
  2946. (ggml_element_size(kv.k)*n_embd_gqa)*(il*n_ctx + kv_head));
  2947. cb(k_cache_view, "k_cache_view", il);
  2948. struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv.v, n_tokens, n_embd_gqa,
  2949. ( n_ctx)*ggml_element_size(kv.v),
  2950. (il*n_ctx)*ggml_element_size(kv.v)*n_embd_gqa + kv_head*ggml_element_size(kv.v));
  2951. cb(v_cache_view, "v_cache_view", il);
  2952. // important: storing RoPE-ed version of K in the KV cache!
  2953. ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view));
  2954. ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur_t, v_cache_view));
  2955. }
  2956. static struct ggml_tensor * llm_build_norm(
  2957. struct ggml_context * ctx,
  2958. struct ggml_tensor * cur,
  2959. const llama_hparams & hparams,
  2960. struct ggml_tensor * mw,
  2961. struct ggml_tensor * mb,
  2962. llm_norm_type type,
  2963. const llm_build_cb & cb,
  2964. int il) {
  2965. switch (type) {
  2966. case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break;
  2967. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break;
  2968. }
  2969. if (mw || mb) {
  2970. cb(cur, "norm", il);
  2971. }
  2972. if (mw) {
  2973. cur = ggml_mul(ctx, cur, mw);
  2974. if (mb) {
  2975. cb(cur, "norm_w", il);
  2976. }
  2977. }
  2978. if (mb) {
  2979. cur = ggml_add(ctx, cur, mb);
  2980. }
  2981. return cur;
  2982. }
  2983. static struct ggml_tensor * llm_build_ffn(
  2984. struct ggml_context * ctx,
  2985. struct ggml_tensor * cur,
  2986. struct ggml_tensor * up,
  2987. struct ggml_tensor * up_b,
  2988. struct ggml_tensor * gate,
  2989. struct ggml_tensor * gate_b,
  2990. struct ggml_tensor * down,
  2991. struct ggml_tensor * down_b,
  2992. llm_ffn_op_type type_op,
  2993. llm_ffn_gate_type type_gate,
  2994. const llm_build_cb & cb,
  2995. int il) {
  2996. struct ggml_tensor * tmp = ggml_mul_mat(ctx, up, cur);
  2997. cb(tmp, "ffn_up", il);
  2998. if (up_b) {
  2999. tmp = ggml_add(ctx, tmp, up_b);
  3000. cb(tmp, "ffn_up_b", il);
  3001. }
  3002. if (gate) {
  3003. switch (type_gate) {
  3004. case LLM_FFN_SEQ:
  3005. {
  3006. cur = ggml_mul_mat(ctx, gate, tmp);
  3007. cb(cur, "ffn_gate", il);
  3008. } break;
  3009. case LLM_FFN_PAR:
  3010. {
  3011. cur = ggml_mul_mat(ctx, gate, cur);
  3012. cb(cur, "ffn_gate", il);
  3013. } break;
  3014. }
  3015. if (gate_b) {
  3016. cur = ggml_add(ctx, cur, gate_b);
  3017. cb(cur, "ffn_gate_b", il);
  3018. }
  3019. } else {
  3020. cur = tmp;
  3021. }
  3022. switch (type_op) {
  3023. case LLM_FFN_SILU:
  3024. {
  3025. cur = ggml_silu(ctx, cur);
  3026. cb(cur, "ffn_silu", il);
  3027. } break;
  3028. case LLM_FFN_GELU:
  3029. {
  3030. cur = ggml_gelu(ctx, cur);
  3031. cb(cur, "ffn_gelu", il);
  3032. } break;
  3033. case LLM_FFN_RELU:
  3034. {
  3035. cur = ggml_relu(ctx, cur);
  3036. cb(cur, "ffn_relu", il);
  3037. } break;
  3038. case LLM_FFN_RELU_SQR:
  3039. {
  3040. cur = ggml_relu(ctx, cur);
  3041. cb(cur, "ffn_relu", il);
  3042. cur = ggml_sqr(ctx, cur);
  3043. cb(cur, "ffn_sqr(relu)", il);
  3044. } break;
  3045. }
  3046. if (type_gate == LLM_FFN_PAR) {
  3047. cur = ggml_mul(ctx, cur, tmp);
  3048. cb(cur, "ffn_gate_par", il);
  3049. }
  3050. cur = ggml_mul_mat(ctx, down, cur);
  3051. if (down_b) {
  3052. cb(cur, "ffn_down", il);
  3053. }
  3054. if (down_b) {
  3055. cur = ggml_add(ctx, cur, down_b);
  3056. }
  3057. return cur;
  3058. }
  3059. // if max_alibi_bias > 0 then apply ALiBi
  3060. static struct ggml_tensor * llm_build_kqv(
  3061. struct ggml_context * ctx,
  3062. const llama_hparams & hparams,
  3063. const llama_kv_cache & kv,
  3064. struct ggml_tensor * wo,
  3065. struct ggml_tensor * wo_b,
  3066. struct ggml_tensor * q_cur,
  3067. struct ggml_tensor * kq_scale,
  3068. struct ggml_tensor * kq_mask,
  3069. int64_t n_ctx,
  3070. int32_t n_tokens,
  3071. int32_t n_kv,
  3072. float max_alibi_bias,
  3073. const llm_build_cb & cb,
  3074. int il) {
  3075. const int64_t n_embd = hparams.n_embd;
  3076. const int64_t n_head = hparams.n_head;
  3077. const int64_t n_head_kv = hparams.n_head_kv;
  3078. const int64_t n_embd_head = hparams.n_embd_head();
  3079. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  3080. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  3081. cb(q, "q", il);
  3082. struct ggml_tensor * k =
  3083. ggml_view_3d(ctx, kv.k,
  3084. n_embd_head, n_kv, n_head_kv,
  3085. ggml_element_size(kv.k)*n_embd_gqa,
  3086. ggml_element_size(kv.k)*n_embd_head,
  3087. ggml_element_size(kv.k)*n_embd_gqa*n_ctx*il);
  3088. cb(k, "k", il);
  3089. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  3090. cb(kq, "kq", il);
  3091. kq = ggml_scale(ctx, kq, kq_scale);
  3092. cb(kq, "kq_scaled", il);
  3093. if (max_alibi_bias > 0.0f) {
  3094. // TODO: n_head or n_head_kv
  3095. // TODO: K-shift is likely not working
  3096. // TODO: change to ggml_add
  3097. kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, max_alibi_bias);
  3098. cb(kq, "kq_scaled_alibi", il);
  3099. }
  3100. kq = ggml_add(ctx, kq, kq_mask);
  3101. cb(kq, "kq_masked", il);
  3102. kq = ggml_soft_max(ctx, kq);
  3103. cb(kq, "kq_soft_max", il);
  3104. // split cached v into n_head heads
  3105. struct ggml_tensor * v =
  3106. ggml_view_3d(ctx, kv.v,
  3107. n_kv, n_embd_head, n_head_kv,
  3108. ggml_element_size(kv.v)*n_ctx,
  3109. ggml_element_size(kv.v)*n_ctx*n_embd_head,
  3110. ggml_element_size(kv.v)*n_ctx*n_embd_gqa*il);
  3111. cb(v, "v", il);
  3112. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  3113. cb(kqv, "kqv", il);
  3114. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  3115. cb(kqv_merged, "kqv_merged", il);
  3116. struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, n_embd, n_tokens);
  3117. cb(cur, "kqv_merged_cont", il);
  3118. cur = ggml_mul_mat(ctx, wo, cur);
  3119. if (wo_b) {
  3120. cb(cur, "kqv_wo", il);
  3121. }
  3122. if (wo_b) {
  3123. cur = ggml_add(ctx, cur, wo_b);
  3124. }
  3125. return cur;
  3126. }
  3127. struct llm_build_context {
  3128. const llama_model & model;
  3129. const llama_hparams & hparams;
  3130. const llama_cparams & cparams;
  3131. const llama_batch & batch;
  3132. const llama_kv_cache & kv_self;
  3133. const int64_t n_embd;
  3134. const int64_t n_layer;
  3135. const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
  3136. const int64_t n_head;
  3137. const int64_t n_head_kv;
  3138. const int64_t n_embd_head;
  3139. const int64_t n_embd_gqa;
  3140. const float freq_base;
  3141. const float freq_scale;
  3142. const float ext_factor;
  3143. const float attn_factor;
  3144. const float beta_fast;
  3145. const float beta_slow;
  3146. const float norm_eps;
  3147. const float norm_rms_eps;
  3148. const int32_t n_tokens;
  3149. const int32_t n_kv; // size of KV cache to consider (n_kv <= n_ctx)
  3150. const int32_t kv_head; // index of where we store new KV data in the cache
  3151. const int32_t n_orig_ctx;
  3152. const bool do_rope_shift;
  3153. const llm_build_cb & cb;
  3154. llama_buffer & buf_compute;
  3155. struct ggml_context * ctx0 = nullptr;
  3156. // TODO: consider making the entire interface noexcept
  3157. llm_build_context(
  3158. llama_context & lctx,
  3159. const llama_batch & batch,
  3160. const llm_build_cb & cb,
  3161. bool worst_case) :
  3162. model (lctx.model),
  3163. hparams (model.hparams),
  3164. cparams (lctx.cparams),
  3165. batch (batch),
  3166. kv_self (lctx.kv_self),
  3167. n_embd (hparams.n_embd),
  3168. n_layer (hparams.n_layer),
  3169. n_ctx (cparams.n_ctx),
  3170. n_head (hparams.n_head),
  3171. n_head_kv (hparams.n_head_kv),
  3172. n_embd_head (hparams.n_embd_head()),
  3173. n_embd_gqa (hparams.n_embd_gqa()),
  3174. freq_base (cparams.rope_freq_base),
  3175. freq_scale (cparams.rope_freq_scale),
  3176. ext_factor (cparams.yarn_ext_factor),
  3177. attn_factor (cparams.yarn_attn_factor),
  3178. beta_fast (cparams.yarn_beta_fast),
  3179. beta_slow (cparams.yarn_beta_slow),
  3180. norm_eps (hparams.f_norm_eps),
  3181. norm_rms_eps (hparams.f_norm_rms_eps),
  3182. n_tokens (batch.n_tokens),
  3183. n_kv (worst_case ? n_ctx : kv_self.n),
  3184. kv_head (worst_case ? n_ctx - n_tokens : kv_self.head),
  3185. n_orig_ctx (cparams.n_yarn_orig_ctx),
  3186. do_rope_shift (worst_case || kv_self.has_shift),
  3187. cb (cb),
  3188. buf_compute (lctx.buf_compute) {
  3189. GGML_ASSERT(!!kv_self.ctx);
  3190. // all initializations should be done in init()
  3191. }
  3192. void init() {
  3193. struct ggml_init_params params = {
  3194. /*.mem_size =*/ buf_compute.size,
  3195. /*.mem_buffer =*/ buf_compute.data,
  3196. /*.no_alloc =*/ true,
  3197. };
  3198. ctx0 = ggml_init(params);
  3199. }
  3200. void free() {
  3201. if (ctx0) {
  3202. ggml_free(ctx0);
  3203. ctx0 = nullptr;
  3204. }
  3205. }
  3206. struct ggml_cgraph * build_llama() {
  3207. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3208. GGML_ASSERT(n_embd_head == hparams.n_rot);
  3209. struct ggml_tensor * cur;
  3210. struct ggml_tensor * inpL;
  3211. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3212. cb(inpL, "inp_embd", -1);
  3213. // inp_pos - contains the positions
  3214. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3215. cb(inp_pos, "inp_pos", -1);
  3216. // KQ_scale
  3217. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  3218. cb(KQ_scale, "KQ_scale", -1);
  3219. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3220. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3221. cb(KQ_mask, "KQ_mask", -1);
  3222. // shift the entire K-cache if needed
  3223. if (do_rope_shift) {
  3224. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  3225. }
  3226. for (int il = 0; il < n_layer; ++il) {
  3227. struct ggml_tensor * inpSA = inpL;
  3228. // norm
  3229. cur = llm_build_norm(ctx0, inpL, hparams,
  3230. model.layers[il].attn_norm, NULL,
  3231. LLM_NORM_RMS, cb, il);
  3232. cb(cur, "attn_norm", il);
  3233. // self-attention
  3234. {
  3235. // compute Q and K and RoPE them
  3236. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  3237. cb(Qcur, "Qcur", il);
  3238. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  3239. cb(Kcur, "Kcur", il);
  3240. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  3241. cb(Vcur, "Vcur", il);
  3242. Qcur = ggml_rope_custom(
  3243. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  3244. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3245. ext_factor, attn_factor, beta_fast, beta_slow
  3246. );
  3247. cb(Qcur, "Qcur", il);
  3248. Kcur = ggml_rope_custom(
  3249. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  3250. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3251. ext_factor, attn_factor, beta_fast, beta_slow
  3252. );
  3253. cb(Kcur, "Kcur", il);
  3254. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3255. cur = llm_build_kqv(ctx0, hparams, kv_self,
  3256. model.layers[il].wo, NULL,
  3257. Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
  3258. cb(cur, "kqv_out", il);
  3259. }
  3260. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  3261. cb(ffn_inp, "ffn_inp", il);
  3262. // feed-forward network
  3263. {
  3264. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3265. model.layers[il].ffn_norm, NULL,
  3266. LLM_NORM_RMS, cb, il);
  3267. cb(cur, "ffn_norm", il);
  3268. cur = llm_build_ffn(ctx0, cur,
  3269. model.layers[il].ffn_up, NULL,
  3270. model.layers[il].ffn_gate, NULL,
  3271. model.layers[il].ffn_down, NULL,
  3272. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  3273. cb(cur, "ffn_out", il);
  3274. }
  3275. cur = ggml_add(ctx0, cur, ffn_inp);
  3276. cb(cur, "l_out", il);
  3277. // input for next layer
  3278. inpL = cur;
  3279. }
  3280. cur = inpL;
  3281. cur = llm_build_norm(ctx0, cur, hparams,
  3282. model.output_norm, NULL,
  3283. LLM_NORM_RMS, cb, -1);
  3284. cb(cur, "result_norm", -1);
  3285. // lm_head
  3286. cur = ggml_mul_mat(ctx0, model.output, cur);
  3287. cb(cur, "result_output", -1);
  3288. ggml_build_forward_expand(gf, cur);
  3289. return gf;
  3290. }
  3291. struct ggml_cgraph * build_baichuan() {
  3292. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3293. struct ggml_tensor * cur;
  3294. struct ggml_tensor * inpL;
  3295. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3296. cb(inpL, "inp_embd", -1);
  3297. // inp_pos - contains the positions
  3298. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3299. cb(inp_pos, "inp_pos", -1);
  3300. // KQ_scale
  3301. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  3302. cb(KQ_scale, "KQ_scale", -1);
  3303. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3304. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3305. cb(KQ_mask, "KQ_mask", -1);
  3306. // shift the entire K-cache if needed
  3307. if (do_rope_shift) {
  3308. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  3309. }
  3310. for (int il = 0; il < n_layer; ++il) {
  3311. struct ggml_tensor * inpSA = inpL;
  3312. cur = llm_build_norm(ctx0, inpL, hparams,
  3313. model.layers[il].attn_norm, NULL,
  3314. LLM_NORM_RMS, cb, il);
  3315. cb(cur, "attn_norm", il);
  3316. // self-attention
  3317. {
  3318. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  3319. cb(Qcur, "Qcur", il);
  3320. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  3321. cb(Kcur, "Kcur", il);
  3322. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  3323. cb(Vcur, "Vcur", il);
  3324. switch (model.type) {
  3325. case MODEL_7B:
  3326. Qcur = ggml_rope_custom(
  3327. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  3328. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3329. ext_factor, attn_factor, beta_fast, beta_slow
  3330. );
  3331. Kcur = ggml_rope_custom(
  3332. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  3333. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3334. ext_factor, attn_factor, beta_fast, beta_slow
  3335. );
  3336. break;
  3337. case MODEL_13B:
  3338. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens);
  3339. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens);
  3340. break;
  3341. default:
  3342. GGML_ASSERT(false);
  3343. }
  3344. cb(Qcur, "Qcur", il);
  3345. cb(Kcur, "Kcur", il);
  3346. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3347. // apply ALiBi for 13B model
  3348. const float max_alibi_bias = model.type == MODEL_13B ? 8.0f : -1.0f;
  3349. cur = llm_build_kqv(ctx0, hparams, kv_self,
  3350. model.layers[il].wo, NULL,
  3351. Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, cb, il);
  3352. cb(cur, "kqv_out", il);
  3353. }
  3354. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  3355. cb(ffn_inp, "ffn_inp", il);
  3356. // feed-forward network
  3357. {
  3358. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3359. model.layers[il].ffn_norm, NULL,
  3360. LLM_NORM_RMS, cb, il);
  3361. cb(cur, "ffn_norm", il);
  3362. cur = llm_build_ffn(ctx0, cur,
  3363. model.layers[il].ffn_up, NULL,
  3364. model.layers[il].ffn_gate, NULL,
  3365. model.layers[il].ffn_down, NULL,
  3366. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  3367. cb(cur, "ffn_out", il);
  3368. }
  3369. cur = ggml_add(ctx0, cur, ffn_inp);
  3370. cb(cur, "l_out", il);
  3371. // input for next layer
  3372. inpL = cur;
  3373. }
  3374. cur = inpL;
  3375. cur = llm_build_norm(ctx0, cur, hparams,
  3376. model.output_norm, NULL,
  3377. LLM_NORM_RMS, cb, -1);
  3378. cb(cur, "result_norm", -1);
  3379. // lm_head
  3380. cur = ggml_mul_mat(ctx0, model.output, cur);
  3381. cb(cur, "result_output", -1);
  3382. ggml_build_forward_expand(gf, cur);
  3383. return gf;
  3384. }
  3385. struct ggml_cgraph * build_falcon() {
  3386. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3387. struct ggml_tensor * cur;
  3388. struct ggml_tensor * inpL;
  3389. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3390. cb(inpL, "inp_embd", -1);
  3391. // inp_pos - contains the positions
  3392. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3393. cb(inp_pos, "inp_pos", -1);
  3394. // KQ_scale
  3395. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  3396. cb(KQ_scale, "KQ_scale", -1);
  3397. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3398. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3399. cb(KQ_mask, "KQ_mask", -1);
  3400. // shift the entire K-cache if needed
  3401. if (do_rope_shift) {
  3402. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  3403. }
  3404. for (int il = 0; il < n_layer; ++il) {
  3405. struct ggml_tensor * attn_norm;
  3406. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  3407. model.layers[il].attn_norm,
  3408. model.layers[il].attn_norm_b,
  3409. LLM_NORM, cb, il);
  3410. cb(attn_norm, "attn_norm", il);
  3411. // self-attention
  3412. {
  3413. if (model.layers[il].attn_norm_2) {
  3414. // Falcon-40B
  3415. cur = llm_build_norm(ctx0, inpL, hparams,
  3416. model.layers[il].attn_norm_2,
  3417. model.layers[il].attn_norm_2_b,
  3418. LLM_NORM, cb, il);
  3419. cb(cur, "attn_norm_2", il);
  3420. } else {
  3421. cur = attn_norm;
  3422. }
  3423. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  3424. cb(cur, "wqkv", il);
  3425. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  3426. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  3427. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  3428. cb(Qcur, "Qcur", il);
  3429. cb(Kcur, "Kcur", il);
  3430. cb(Vcur, "Vcur", il);
  3431. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3432. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  3433. // using mode = 2 for neox mode
  3434. Qcur = ggml_rope_custom(
  3435. ctx0, Qcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
  3436. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  3437. );
  3438. cb(Qcur, "Qcur", il);
  3439. Kcur = ggml_rope_custom(
  3440. ctx0, Kcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
  3441. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  3442. );
  3443. cb(Kcur, "Kcur", il);
  3444. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3445. cur = llm_build_kqv(ctx0, hparams, kv_self,
  3446. model.layers[il].wo, NULL,
  3447. Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
  3448. cb(cur, "kqv_out", il);
  3449. }
  3450. struct ggml_tensor * ffn_inp = cur;
  3451. // feed forward
  3452. {
  3453. cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result
  3454. model.layers[il].ffn_up, NULL,
  3455. NULL, NULL,
  3456. model.layers[il].ffn_down, NULL,
  3457. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  3458. cb(cur, "ffn_out", il);
  3459. }
  3460. cur = ggml_add(ctx0, cur, ffn_inp);
  3461. cb(cur, "l_out", il);
  3462. cur = ggml_add(ctx0, cur, inpL);
  3463. cb(cur, "l_out", il);
  3464. // input for next layer
  3465. inpL = cur;
  3466. }
  3467. cur = inpL;
  3468. // norm
  3469. cur = llm_build_norm(ctx0, cur, hparams,
  3470. model.output_norm,
  3471. model.output_norm_b,
  3472. LLM_NORM, cb, -1);
  3473. cb(cur, "result_norm", -1);
  3474. cur = ggml_mul_mat(ctx0, model.output, cur);
  3475. cb(cur, "result_output", -1);
  3476. ggml_build_forward_expand(gf, cur);
  3477. return gf;
  3478. }
  3479. struct ggml_cgraph * build_starcoder() {
  3480. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3481. struct ggml_tensor * cur;
  3482. struct ggml_tensor * pos;
  3483. struct ggml_tensor * inpL;
  3484. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3485. cb(inpL, "inp_embd", -1);
  3486. // inp_pos - contains the positions
  3487. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3488. cb(inp_pos, "inp_pos", -1);
  3489. // KQ_scale
  3490. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  3491. cb(KQ_scale, "KQ_scale", -1);
  3492. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3493. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3494. cb(KQ_mask, "KQ_mask", -1);
  3495. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  3496. cb(pos, "pos_embd", -1);
  3497. inpL = ggml_add(ctx0, inpL, pos);
  3498. cb(inpL, "inpL", -1);
  3499. for (int il = 0; il < n_layer; ++il) {
  3500. cur = llm_build_norm(ctx0, inpL, hparams,
  3501. model.layers[il].attn_norm,
  3502. model.layers[il].attn_norm_b,
  3503. LLM_NORM, cb, il);
  3504. cb(cur, "attn_norm", il);
  3505. // self-attention
  3506. {
  3507. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  3508. cb(cur, "wqkv", il);
  3509. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  3510. cb(cur, "bqkv", il);
  3511. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  3512. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  3513. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  3514. cb(Qcur, "Qcur", il);
  3515. cb(Kcur, "Kcur", il);
  3516. cb(Vcur, "Vcur", il);
  3517. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3518. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3519. cur = llm_build_kqv(ctx0, hparams, kv_self,
  3520. model.layers[il].wo, model.layers[il].bo,
  3521. Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
  3522. cb(cur, "kqv_out", il);
  3523. }
  3524. // add the input
  3525. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  3526. cb(ffn_inp, "ffn_inp", il);
  3527. // FF
  3528. {
  3529. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3530. model.layers[il].ffn_norm,
  3531. model.layers[il].ffn_norm_b,
  3532. LLM_NORM, cb, il);
  3533. cb(cur, "ffn_norm", il);
  3534. cur = llm_build_ffn(ctx0, cur,
  3535. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  3536. NULL, NULL,
  3537. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  3538. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  3539. cb(cur, "ffn_out", il);
  3540. }
  3541. inpL = ggml_add(ctx0, cur, ffn_inp);
  3542. cb(inpL, "l_out", il);
  3543. }
  3544. cur = llm_build_norm(ctx0, inpL, hparams,
  3545. model.output_norm,
  3546. model.output_norm_b,
  3547. LLM_NORM, cb, -1);
  3548. cb(cur, "result_norm", -1);
  3549. cur = ggml_mul_mat(ctx0, model.output, cur);
  3550. cb(cur, "result_output", -1);
  3551. ggml_build_forward_expand(gf, cur);
  3552. return gf;
  3553. }
  3554. struct ggml_cgraph * build_persimmon() {
  3555. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3556. const int64_t n_rot = n_embd_head / 2;
  3557. struct ggml_tensor * cur;
  3558. struct ggml_tensor * inpL;
  3559. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3560. cb(inpL, "imp_embd", -1);
  3561. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3562. cb(inp_pos, "inp_pos", -1);
  3563. // KQ_scale
  3564. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  3565. cb(KQ_scale, "KQ_scale", -1);
  3566. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3567. cb(KQ_mask, "KQ_mask", -1);
  3568. if (do_rope_shift) {
  3569. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  3570. }
  3571. for (int il = 0; il < n_layer; ++il) {
  3572. struct ggml_tensor * residual = inpL;
  3573. cur = llm_build_norm(ctx0, inpL, hparams,
  3574. model.layers[il].attn_norm,
  3575. model.layers[il].attn_norm_b,
  3576. LLM_NORM, cb, il);
  3577. cb(cur, "attn_norm", il);
  3578. // self attention
  3579. {
  3580. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  3581. cb(cur, "wqkv", il);
  3582. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  3583. cb(cur, "bqkv", il);
  3584. // split qkv
  3585. GGML_ASSERT(n_head_kv == n_head);
  3586. struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens);
  3587. cb(tmpqkv, "tmpqkv", il);
  3588. struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2));
  3589. cb(tmpqkv_perm, "tmpqkv", il);
  3590. struct ggml_tensor * tmpq = ggml_view_3d(
  3591. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  3592. ggml_element_size(tmpqkv_perm) * n_embd_head,
  3593. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  3594. 0
  3595. );
  3596. cb(tmpq, "tmpq", il);
  3597. struct ggml_tensor * tmpk = ggml_view_3d(
  3598. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  3599. ggml_element_size(tmpqkv_perm) * n_embd_head,
  3600. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  3601. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens
  3602. );
  3603. cb(tmpk, "tmpk", il);
  3604. // Q/K Layernorm
  3605. tmpq = llm_build_norm(ctx0, tmpq, hparams,
  3606. model.layers[il].attn_q_norm,
  3607. model.layers[il].attn_q_norm_b,
  3608. LLM_NORM, cb, il);
  3609. cb(tmpq, "tmpq", il);
  3610. tmpk = llm_build_norm(ctx0, tmpk, hparams,
  3611. model.layers[il].attn_k_norm,
  3612. model.layers[il].attn_k_norm_b,
  3613. LLM_NORM, cb, il);
  3614. cb(tmpk, "tmpk", il);
  3615. // RoPE the first n_rot of q/k, pass the other half, and concat.
  3616. struct ggml_tensor * qrot = ggml_view_3d(
  3617. ctx0, tmpq, n_rot, n_head, n_tokens,
  3618. ggml_element_size(tmpq) * n_embd_head,
  3619. ggml_element_size(tmpq) * n_embd_head * n_head,
  3620. 0
  3621. );
  3622. cb(qrot, "qrot", il);
  3623. struct ggml_tensor * krot = ggml_view_3d(
  3624. ctx0, tmpk, n_rot, n_head, n_tokens,
  3625. ggml_element_size(tmpk) * n_embd_head,
  3626. ggml_element_size(tmpk) * n_embd_head * n_head,
  3627. 0
  3628. );
  3629. cb(krot, "krot", il);
  3630. // get the second half of tmpq, e.g tmpq[n_rot:, :, :]
  3631. struct ggml_tensor * qpass = ggml_view_3d(
  3632. ctx0, tmpq, n_rot, n_head, n_tokens,
  3633. ggml_element_size(tmpq) * n_embd_head,
  3634. ggml_element_size(tmpq) * n_embd_head * n_head,
  3635. ggml_element_size(tmpq) * n_rot
  3636. );
  3637. cb(qpass, "qpass", il);
  3638. struct ggml_tensor * kpass = ggml_view_3d(
  3639. ctx0, tmpk, n_rot, n_head, n_tokens,
  3640. ggml_element_size(tmpk) * n_embd_head,
  3641. ggml_element_size(tmpk) * n_embd_head * n_head,
  3642. ggml_element_size(tmpk) * n_rot
  3643. );
  3644. cb(kpass, "kpass", il);
  3645. struct ggml_tensor * qrotated = ggml_rope_custom(
  3646. ctx0, qrot, inp_pos, n_rot, 2, 0, n_orig_ctx,
  3647. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  3648. );
  3649. cb(qrotated, "qrotated", il);
  3650. struct ggml_tensor * krotated = ggml_rope_custom(
  3651. ctx0, krot, inp_pos, n_rot, 2, 0, n_orig_ctx,
  3652. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  3653. );
  3654. cb(krotated, "krotated", il);
  3655. // ggml currently only supports concatenation on dim=2
  3656. // so we need to permute qrot, qpass, concat, then permute back.
  3657. qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3));
  3658. cb(qrotated, "qrotated", il);
  3659. krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3));
  3660. cb(krotated, "krotated", il);
  3661. qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3));
  3662. cb(qpass, "qpass", il);
  3663. kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3));
  3664. cb(kpass, "kpass", il);
  3665. struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass);
  3666. cb(Qcur, "Qcur", il);
  3667. struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass);
  3668. cb(Kcur, "Kcur", il);
  3669. struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 2, 1, 0, 3));
  3670. cb(Q, "Q", il);
  3671. Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3));
  3672. cb(Kcur, "Kcur", il);
  3673. struct ggml_tensor * Vcur = ggml_view_3d(
  3674. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  3675. ggml_element_size(tmpqkv_perm) * n_embd_head,
  3676. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  3677. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2
  3678. );
  3679. cb(Vcur, "Vcur", il);
  3680. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3681. // TODO: not tested, could be broken
  3682. cur = llm_build_kqv(ctx0, hparams, kv_self,
  3683. model.layers[il].wo, model.layers[il].bo,
  3684. Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
  3685. cb(cur, "kqv_out", il);
  3686. }
  3687. struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
  3688. cb(ffn_inp, "ffn_inp", il);
  3689. // feed-forward network
  3690. {
  3691. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3692. model.layers[il].ffn_norm,
  3693. model.layers[il].ffn_norm_b,
  3694. LLM_NORM, cb, il);
  3695. cb(cur, "ffn_norm", il);
  3696. cur = llm_build_ffn(ctx0, cur,
  3697. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  3698. NULL, NULL,
  3699. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  3700. LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il);
  3701. cb(cur, "ffn_out", il);
  3702. }
  3703. cur = ggml_add(ctx0, cur, ffn_inp);
  3704. cb(cur, "l_out", il);
  3705. inpL = cur;
  3706. }
  3707. cur = inpL;
  3708. cur = llm_build_norm(ctx0, cur, hparams,
  3709. model.output_norm,
  3710. model.output_norm_b,
  3711. LLM_NORM, cb, -1);
  3712. cb(cur, "result_norm", -1);
  3713. cur = ggml_mul_mat(ctx0, model.output, cur);
  3714. cb(cur, "result_output", -1);
  3715. ggml_build_forward_expand(gf, cur);
  3716. return gf;
  3717. }
  3718. struct ggml_cgraph * build_refact() {
  3719. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3720. struct ggml_tensor * cur;
  3721. struct ggml_tensor * inpL;
  3722. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3723. cb(inpL, "inp_embd", -1);
  3724. // KQ_scale
  3725. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  3726. cb(KQ_scale, "KQ_scale", -1);
  3727. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3728. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3729. cb(KQ_mask, "KQ_mask", -1);
  3730. for (int il = 0; il < n_layer; ++il) {
  3731. struct ggml_tensor * inpSA = inpL;
  3732. cur = llm_build_norm(ctx0, inpL, hparams,
  3733. model.layers[il].attn_norm, NULL,
  3734. LLM_NORM_RMS, cb, il);
  3735. cb(cur, "attn_norm", il);
  3736. // self-attention
  3737. {
  3738. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  3739. cb(Qcur, "Qcur", il);
  3740. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  3741. cb(Kcur, "Kcur", il);
  3742. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  3743. cb(Vcur, "Vcur", il);
  3744. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  3745. cb(Kcur, "Kcur", il);
  3746. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3747. cb(Qcur, "Qcur", il);
  3748. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3749. cur = llm_build_kqv(ctx0, hparams, kv_self,
  3750. model.layers[il].wo, NULL,
  3751. Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il);
  3752. cb(cur, "kqv_out", il);
  3753. }
  3754. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  3755. cb(ffn_inp, "ffn_inp", il);
  3756. // feed-forward network
  3757. {
  3758. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3759. model.layers[il].ffn_norm, NULL,
  3760. LLM_NORM_RMS, cb, il);
  3761. cb(cur, "ffn_norm", il);
  3762. cur = llm_build_ffn(ctx0, cur,
  3763. model.layers[il].ffn_up, NULL,
  3764. model.layers[il].ffn_gate, NULL,
  3765. model.layers[il].ffn_down, NULL,
  3766. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  3767. cb(cur, "ffn_out", il);
  3768. }
  3769. cur = ggml_add(ctx0, cur, ffn_inp);
  3770. cb(cur, "l_out", il);
  3771. // input for next layer
  3772. inpL = cur;
  3773. }
  3774. cur = inpL;
  3775. cur = llm_build_norm(ctx0, cur, hparams,
  3776. model.output_norm, NULL,
  3777. LLM_NORM_RMS, cb, -1);
  3778. cb(cur, "result_norm", -1);
  3779. // lm_head
  3780. cur = ggml_mul_mat(ctx0, model.output, cur);
  3781. cb(cur, "result_output", -1);
  3782. ggml_build_forward_expand(gf, cur);
  3783. return gf;
  3784. }
  3785. struct ggml_cgraph * build_bloom() {
  3786. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3787. struct ggml_tensor * cur;
  3788. struct ggml_tensor * inpL;
  3789. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3790. cb(inpL, "inp_embd", -1);
  3791. // KQ_scale
  3792. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  3793. cb(KQ_scale, "KQ_scale", -1);
  3794. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3795. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3796. cb(KQ_mask, "KQ_mask", -1);
  3797. inpL = llm_build_norm(ctx0, inpL, hparams,
  3798. model.tok_norm,
  3799. model.tok_norm_b,
  3800. LLM_NORM, cb, -1);
  3801. cb(inpL, "inp_norm", -1);
  3802. for (int il = 0; il < n_layer; ++il) {
  3803. cur = llm_build_norm(ctx0, inpL, hparams,
  3804. model.layers[il].attn_norm,
  3805. model.layers[il].attn_norm_b,
  3806. LLM_NORM, cb, il);
  3807. cb(cur, "attn_norm", il);
  3808. // self-attention
  3809. {
  3810. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  3811. cb(cur, "wqkv", il);
  3812. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  3813. cb(cur, "bqkv", il);
  3814. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  3815. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  3816. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  3817. cb(Qcur, "Qcur", il);
  3818. cb(Kcur, "Kcur", il);
  3819. cb(Vcur, "Vcur", il);
  3820. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3821. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3822. cur = llm_build_kqv(ctx0, hparams, kv_self,
  3823. model.layers[il].wo, model.layers[il].bo,
  3824. Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il);
  3825. cb(cur, "kqv_out", il);
  3826. }
  3827. // Add the input
  3828. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  3829. cb(ffn_inp, "ffn_inp", il);
  3830. // FF
  3831. {
  3832. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3833. model.layers[il].ffn_norm,
  3834. model.layers[il].ffn_norm_b,
  3835. LLM_NORM, cb, il);
  3836. cb(cur, "ffn_norm", il);
  3837. cur = llm_build_ffn(ctx0, cur,
  3838. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  3839. NULL, NULL,
  3840. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  3841. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  3842. cb(cur, "ffn_out", il);
  3843. }
  3844. inpL = ggml_add(ctx0, cur, ffn_inp);
  3845. cb(inpL, "l_out", il);
  3846. }
  3847. cur = llm_build_norm(ctx0, inpL, hparams,
  3848. model.output_norm,
  3849. model.output_norm_b,
  3850. LLM_NORM, cb, -1);
  3851. cb(cur, "result_norm", -1);
  3852. cur = ggml_mul_mat(ctx0, model.output, cur);
  3853. cb(cur, "result_output", -1);
  3854. ggml_build_forward_expand(gf, cur);
  3855. return gf;
  3856. }
  3857. struct ggml_cgraph * build_mpt() {
  3858. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3859. struct ggml_tensor * cur;
  3860. struct ggml_tensor * inpL;
  3861. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3862. cb(inpL, "inp_embd", -1);
  3863. // KQ_scale
  3864. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  3865. cb(KQ_scale, "KQ_scale", -1);
  3866. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3867. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3868. cb(KQ_mask, "KQ_mask", -1);
  3869. for (int il = 0; il < n_layer; ++il) {
  3870. struct ggml_tensor * attn_norm;
  3871. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  3872. model.layers[il].attn_norm,
  3873. NULL,
  3874. LLM_NORM, cb, il);
  3875. cb(attn_norm, "attn_norm", il);
  3876. // self-attention
  3877. {
  3878. cur = attn_norm;
  3879. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  3880. cb(cur, "wqkv", il);
  3881. if (hparams.f_clamp_kqv > 0.0f) {
  3882. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  3883. cb(cur, "wqkv_clamped", il);
  3884. }
  3885. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  3886. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  3887. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  3888. cb(Qcur, "Qcur", il);
  3889. cb(Kcur, "Kcur", il);
  3890. cb(Vcur, "Vcur", il);
  3891. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3892. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3893. cur = llm_build_kqv(ctx0, hparams, kv_self,
  3894. model.layers[il].wo, NULL,
  3895. Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, cb, il);
  3896. cb(cur, "kqv_out", il);
  3897. }
  3898. // Add the input
  3899. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  3900. cb(ffn_inp, "ffn_inp", il);
  3901. // feed forward
  3902. {
  3903. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3904. model.layers[il].ffn_norm,
  3905. NULL,
  3906. LLM_NORM, cb, il);
  3907. cb(cur, "ffn_norm", il);
  3908. cur = llm_build_ffn(ctx0, cur,
  3909. model.layers[il].ffn_up, NULL,
  3910. NULL, NULL,
  3911. model.layers[il].ffn_down, NULL,
  3912. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  3913. cb(cur, "ffn_out", il);
  3914. }
  3915. cur = ggml_add(ctx0, cur, ffn_inp);
  3916. cb(cur, "l_out", il);
  3917. // input for next layer
  3918. inpL = cur;
  3919. }
  3920. cur = inpL;
  3921. cur = llm_build_norm(ctx0, cur, hparams,
  3922. model.output_norm,
  3923. NULL,
  3924. LLM_NORM, cb, -1);
  3925. cb(cur, "result_norm", -1);
  3926. cur = ggml_mul_mat(ctx0, model.output, cur);
  3927. cb(cur, "result_output", -1);
  3928. ggml_build_forward_expand(gf, cur);
  3929. return gf;
  3930. }
  3931. struct ggml_cgraph * build_stablelm() {
  3932. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  3933. struct ggml_tensor * cur;
  3934. struct ggml_tensor * inpL;
  3935. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3936. cb(inpL, "inp_embd", -1);
  3937. // inp_pos - contains the positions
  3938. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3939. cb(inp_pos, "inp_pos", -1);
  3940. // KQ_scale
  3941. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  3942. cb(KQ_scale, "KQ_scale", -1);
  3943. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3944. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3945. cb(KQ_mask, "KQ_mask", -1);
  3946. // shift the entire K-cache if needed
  3947. if (do_rope_shift) {
  3948. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, hparams.n_rot, freq_base, freq_scale, cb);
  3949. }
  3950. for (int il = 0; il < n_layer; ++il) {
  3951. struct ggml_tensor * inpSA = inpL;
  3952. // norm
  3953. cur = llm_build_norm(ctx0, inpL, hparams,
  3954. model.layers[il].attn_norm,
  3955. model.layers[il].attn_norm_b,
  3956. LLM_NORM, cb, il);
  3957. cb(cur, "attn_norm", il);
  3958. // self-attention
  3959. {
  3960. // compute Q and K and RoPE them
  3961. struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  3962. cb(tmpq, "tmpq", il);
  3963. struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  3964. cb(tmpk, "tmpk", il);
  3965. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  3966. cb(Vcur, "Vcur", il);
  3967. // RoPE the first n_rot of q/k, pass the other half, and concat.
  3968. struct ggml_tensor * qrot = ggml_cont(ctx0, ggml_view_3d(
  3969. ctx0, tmpq, hparams.n_rot, n_head, n_tokens,
  3970. ggml_element_size(tmpq) * n_embd_head,
  3971. ggml_element_size(tmpq) * n_embd_head * n_head,
  3972. 0
  3973. ));
  3974. cb(qrot, "qrot", il);
  3975. struct ggml_tensor * krot = ggml_cont(ctx0, ggml_view_3d(
  3976. ctx0, tmpk, hparams.n_rot, n_head, n_tokens,
  3977. ggml_element_size(tmpk) * n_embd_head,
  3978. ggml_element_size(tmpk) * n_embd_head * n_head_kv,
  3979. 0
  3980. ));
  3981. cb(krot, "krot", il);
  3982. // get the second half of tmpq, e.g tmpq[n_rot:, :, :]
  3983. struct ggml_tensor * qpass = ggml_view_3d(
  3984. ctx0, tmpq, (n_embd_head - hparams.n_rot), n_head, n_tokens,
  3985. ggml_element_size(tmpq) * n_embd_head,
  3986. ggml_element_size(tmpq) * n_embd_head * n_head,
  3987. ggml_element_size(tmpq) * hparams.n_rot
  3988. );
  3989. cb(qpass, "qpass", il);
  3990. struct ggml_tensor * kpass = ggml_view_3d(
  3991. ctx0, tmpk, (n_embd_head - hparams.n_rot), n_head_kv, n_tokens,
  3992. ggml_element_size(tmpk) * (n_embd_head),
  3993. ggml_element_size(tmpk) * (n_embd_head) * n_head_kv,
  3994. ggml_element_size(tmpk) * hparams.n_rot
  3995. );
  3996. cb(kpass, "kpass", il);
  3997. struct ggml_tensor * qrotated = ggml_rope_custom(
  3998. ctx0, qrot, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  3999. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4000. );
  4001. cb(qrotated, "qrotated", il);
  4002. struct ggml_tensor * krotated = ggml_rope_custom(
  4003. ctx0, krot, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4004. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4005. );
  4006. cb(krotated, "krotated", il);
  4007. // ggml currently only supports concatenation on dim=2
  4008. // so we need to permute qrot, qpass, concat, then permute back.
  4009. qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3));
  4010. cb(qrotated, "qrotated", il);
  4011. krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3));
  4012. cb(krotated, "krotated", il);
  4013. qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3));
  4014. cb(qpass, "qpass", il);
  4015. kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3));
  4016. cb(kpass, "kpass", il);
  4017. struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass);
  4018. cb(Qcur, "Qcur", il);
  4019. struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass);
  4020. cb(Kcur, "Kcur", il);
  4021. struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 2, 1, 0, 3));
  4022. cb(Q, "Q", il);
  4023. Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3));
  4024. cb(Kcur, "Kcur", il);
  4025. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4026. cur = llm_build_kqv(ctx0, hparams, kv_self,
  4027. model.layers[il].wo, NULL,
  4028. Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
  4029. cb(cur, "kqv_out", il);
  4030. }
  4031. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4032. cb(ffn_inp, "ffn_inp", il);
  4033. // feed-forward network
  4034. {
  4035. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4036. model.layers[il].ffn_norm,
  4037. model.layers[il].ffn_norm_b,
  4038. LLM_NORM, cb, il);
  4039. cb(cur, "ffn_norm", il);
  4040. cur = llm_build_ffn(ctx0, cur,
  4041. model.layers[il].ffn_up, NULL,
  4042. model.layers[il].ffn_gate, NULL,
  4043. model.layers[il].ffn_down, NULL,
  4044. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4045. cb(cur, "ffn_out", il);
  4046. }
  4047. cur = ggml_add(ctx0, cur, ffn_inp);
  4048. cb(cur, "l_out", il);
  4049. // input for next layer
  4050. inpL = cur;
  4051. }
  4052. cur = inpL;
  4053. cur = llm_build_norm(ctx0, cur, hparams,
  4054. model.output_norm,
  4055. model.output_norm_b,
  4056. LLM_NORM, cb, -1);
  4057. cb(cur, "result_norm", -1);
  4058. // lm_head
  4059. cur = ggml_mul_mat(ctx0, model.output, cur);
  4060. cb(cur, "result_output", -1);
  4061. ggml_build_forward_expand(gf, cur);
  4062. return gf;
  4063. }
  4064. };
  4065. //
  4066. // tensor offloading helpers
  4067. //
  4068. // TODO: will be removed with backend v2
  4069. enum llm_offload_func_e {
  4070. OFFLOAD_FUNC_NOP,
  4071. OFFLOAD_FUNC,
  4072. OFFLOAD_FUNC_KQ,
  4073. OFFLOAD_FUNC_V,
  4074. OFFLOAD_FUNC_NR,
  4075. OFFLOAD_FUNC_EMB,
  4076. OFFLOAD_FUNC_OUT,
  4077. };
  4078. // TODO: will be removed with backend v2
  4079. struct llm_offload_trie {
  4080. struct node {
  4081. ~node() {
  4082. for (int i = 0; i < 256; ++i) {
  4083. if (children[i]) {
  4084. delete children[i];
  4085. }
  4086. }
  4087. }
  4088. node * children[256] = { nullptr };
  4089. llm_offload_func_e func = OFFLOAD_FUNC_NOP;
  4090. };
  4091. llm_offload_trie() {
  4092. root = new node;
  4093. }
  4094. llm_offload_trie(const std::unordered_map<const char *, llm_offload_func_e> & map) {
  4095. root = new node;
  4096. for (const auto & kv : map) {
  4097. add(kv.first, kv.second);
  4098. }
  4099. }
  4100. ~llm_offload_trie() {
  4101. delete root;
  4102. }
  4103. void add(const char * name, llm_offload_func_e func) {
  4104. node * cur = root;
  4105. for (int i = 0; ; ++i) {
  4106. const uint8_t c = name[i];
  4107. if (!c) {
  4108. break;
  4109. }
  4110. if (!cur->children[c]) {
  4111. cur->children[c] = new node;
  4112. }
  4113. cur = cur->children[c];
  4114. }
  4115. cur->func = func;
  4116. }
  4117. llm_offload_func_e find(const char * name) const {
  4118. const node * cur = root;
  4119. for (int i = 0; ; ++i) {
  4120. const uint8_t c = name[i];
  4121. if (!c) {
  4122. break;
  4123. }
  4124. if (!cur->children[c]) {
  4125. return OFFLOAD_FUNC_NOP;
  4126. }
  4127. cur = cur->children[c];
  4128. }
  4129. return cur->func;
  4130. }
  4131. node * root = nullptr;
  4132. };
  4133. // TODO: will be removed with backend v2
  4134. static const std::unordered_map<const char *, llm_offload_func_e> k_offload_map = {
  4135. //{ "inp_tokens", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel
  4136. //{ "inp_embd", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel
  4137. { "pos_embd", OFFLOAD_FUNC_NR },
  4138. { "inp_pos", OFFLOAD_FUNC_KQ }, // this is often used for KQ ops (e.g. rope)
  4139. { "KQ_scale", OFFLOAD_FUNC_KQ },
  4140. { "KQ_mask", OFFLOAD_FUNC_KQ },
  4141. { "K_shift", OFFLOAD_FUNC_KQ },
  4142. { "K_shifted", OFFLOAD_FUNC_KQ },
  4143. { "inp_norm", OFFLOAD_FUNC_NR },
  4144. { "inp_norm_w", OFFLOAD_FUNC_NR },
  4145. { "inp_norm_wb", OFFLOAD_FUNC_NR },
  4146. { "norm", OFFLOAD_FUNC },
  4147. { "norm_w", OFFLOAD_FUNC },
  4148. { "norm_wb", OFFLOAD_FUNC },
  4149. { "attn_norm", OFFLOAD_FUNC },
  4150. { "attn_norm_2", OFFLOAD_FUNC },
  4151. { "wqkv", OFFLOAD_FUNC_KQ },
  4152. { "bqkv", OFFLOAD_FUNC_KQ },
  4153. { "wqkv_clamped", OFFLOAD_FUNC_KQ },
  4154. { "tmpk", OFFLOAD_FUNC_KQ },
  4155. { "tmpq", OFFLOAD_FUNC_KQ },
  4156. { "tmpv", OFFLOAD_FUNC_V },
  4157. { "Kcur", OFFLOAD_FUNC_KQ },
  4158. { "Qcur", OFFLOAD_FUNC_KQ },
  4159. { "Vcur", OFFLOAD_FUNC_V },
  4160. { "krot", OFFLOAD_FUNC_KQ },
  4161. { "qrot", OFFLOAD_FUNC_KQ },
  4162. { "kpass", OFFLOAD_FUNC_KQ },
  4163. { "qpass", OFFLOAD_FUNC_KQ },
  4164. { "krotated", OFFLOAD_FUNC_KQ },
  4165. { "qrotated", OFFLOAD_FUNC_KQ },
  4166. { "q", OFFLOAD_FUNC_KQ },
  4167. { "k", OFFLOAD_FUNC_KQ },
  4168. { "kq", OFFLOAD_FUNC_KQ },
  4169. { "kq_scaled", OFFLOAD_FUNC_KQ },
  4170. { "kq_scaled_alibi", OFFLOAD_FUNC_KQ },
  4171. { "kq_masked", OFFLOAD_FUNC_KQ },
  4172. { "kq_soft_max", OFFLOAD_FUNC_V },
  4173. { "v", OFFLOAD_FUNC_V },
  4174. { "kqv", OFFLOAD_FUNC_V },
  4175. { "kqv_merged", OFFLOAD_FUNC_V },
  4176. { "kqv_merged_cont", OFFLOAD_FUNC_V },
  4177. { "kqv_wo", OFFLOAD_FUNC_V },
  4178. { "kqv_out", OFFLOAD_FUNC_V },
  4179. { "ffn_inp", OFFLOAD_FUNC },
  4180. { "ffn_norm", OFFLOAD_FUNC },
  4181. { "ffn_up", OFFLOAD_FUNC },
  4182. { "ffn_up_b", OFFLOAD_FUNC },
  4183. { "ffn_gate", OFFLOAD_FUNC },
  4184. { "ffn_gate_b", OFFLOAD_FUNC },
  4185. { "ffn_gate_par", OFFLOAD_FUNC },
  4186. { "ffn_down", OFFLOAD_FUNC },
  4187. { "ffn_down_b", OFFLOAD_FUNC },
  4188. { "ffn_out", OFFLOAD_FUNC },
  4189. { "ffn_silu", OFFLOAD_FUNC },
  4190. { "ffn_gelu", OFFLOAD_FUNC },
  4191. { "ffn_relu", OFFLOAD_FUNC },
  4192. { "ffn_sqr(relu)", OFFLOAD_FUNC },
  4193. { "l_out", OFFLOAD_FUNC },
  4194. { "result_norm", OFFLOAD_FUNC_EMB },
  4195. { "result_output", OFFLOAD_FUNC_OUT },
  4196. };
  4197. static llm_offload_trie k_offload_func_trie(k_offload_map);
  4198. static struct ggml_cgraph * llama_build_graph(
  4199. llama_context & lctx,
  4200. const llama_batch & batch) {
  4201. const auto & model = lctx.model;
  4202. // check if we should build the worst-case graph (for memory measurement)
  4203. const bool worst_case = ggml_allocr_is_measure(lctx.alloc);
  4204. // keep track of the input that has already been allocated
  4205. bool alloc_inp_tokens = false;
  4206. bool alloc_inp_embd = false;
  4207. bool alloc_inp_pos = false;
  4208. bool alloc_inp_KQ_scale = false;
  4209. bool alloc_inp_KQ_mask = false;
  4210. bool alloc_inp_K_shift = false;
  4211. #ifdef GGML_USE_CUBLAS
  4212. const bool do_offload = true;
  4213. #else
  4214. const bool do_offload = true; // TODO: set to false after finishing refactoring
  4215. #endif
  4216. int n_non_view = 0; // number of non-view tensors that have been processed by the callback
  4217. // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
  4218. // TODO: will be removed with backend v2
  4219. llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
  4220. if (il >= 0) {
  4221. ggml_format_name(cur, "%s-%d", name, il);
  4222. } else {
  4223. ggml_set_name(cur, name);
  4224. }
  4225. //
  4226. // allocate input tensors and set input data
  4227. //
  4228. // TODO: will be removed with backend v2
  4229. if (!alloc_inp_tokens && strcmp(name, "inp_tokens") == 0) {
  4230. ggml_allocr_alloc(lctx.alloc, cur);
  4231. if (!ggml_allocr_is_measure(lctx.alloc) && batch.token) {
  4232. const int64_t n_tokens = cur->ne[0];
  4233. memcpy(cur->data, batch.token, n_tokens*ggml_element_size(cur));
  4234. }
  4235. alloc_inp_tokens = true;
  4236. }
  4237. if (!alloc_inp_embd && strcmp(name, "inp_embd") == 0) {
  4238. ggml_allocr_alloc(lctx.alloc, cur);
  4239. if (!ggml_allocr_is_measure(lctx.alloc) && batch.embd) {
  4240. const int64_t n_embd = cur->ne[0];
  4241. const int64_t n_tokens = cur->ne[1];
  4242. memcpy(cur->data, batch.embd, n_tokens*n_embd*ggml_element_size(cur));
  4243. }
  4244. alloc_inp_embd = true;
  4245. }
  4246. if (!alloc_inp_pos && strcmp(name, "inp_pos") == 0) {
  4247. ggml_allocr_alloc(lctx.alloc, cur);
  4248. if (!ggml_allocr_is_measure(lctx.alloc) && batch.pos) {
  4249. const int64_t n_tokens = cur->ne[0];
  4250. int32_t * data = (int32_t *) cur->data;
  4251. for (int i = 0; i < n_tokens; ++i) {
  4252. data[i] = batch.pos[i];
  4253. }
  4254. }
  4255. alloc_inp_pos = true;
  4256. }
  4257. if (!alloc_inp_KQ_scale && strcmp(name, "KQ_scale") == 0) {
  4258. ggml_allocr_alloc(lctx.alloc, cur);
  4259. if (!ggml_allocr_is_measure(lctx.alloc)) {
  4260. const int64_t n_embd_head = model.hparams.n_embd_head();
  4261. ggml_set_f32(cur, 1.0f/sqrtf(float(n_embd_head)));
  4262. }
  4263. alloc_inp_KQ_scale = true;
  4264. }
  4265. if (!alloc_inp_KQ_mask && strcmp(name, "KQ_mask") == 0) {
  4266. ggml_allocr_alloc(lctx.alloc, cur);
  4267. if (!ggml_allocr_is_measure(lctx.alloc)) {
  4268. const int64_t n_kv = cur->ne[0];
  4269. const int64_t n_tokens = cur->ne[1];
  4270. float * data = (float *) cur->data;
  4271. memset(data, 0, ggml_nbytes(cur));
  4272. for (int h = 0; h < 1; ++h) {
  4273. for (int j = 0; j < n_tokens; ++j) {
  4274. const llama_pos pos = batch.pos[j];
  4275. const llama_seq_id seq_id = batch.seq_id[j][0];
  4276. for (int i = 0; i < n_kv; ++i) {
  4277. if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
  4278. data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY;
  4279. }
  4280. }
  4281. }
  4282. }
  4283. }
  4284. alloc_inp_KQ_mask = true;
  4285. }
  4286. if (!alloc_inp_K_shift && strcmp(name, "K_shift") == 0) {
  4287. ggml_allocr_alloc(lctx.alloc, cur);
  4288. if (!ggml_allocr_is_measure(lctx.alloc)) {
  4289. const int64_t n_ctx = cur->ne[0];
  4290. int32_t * data = (int32_t *) cur->data;
  4291. for (int i = 0; i < n_ctx; ++i) {
  4292. data[i] = lctx.kv_self.cells[i].delta;
  4293. }
  4294. }
  4295. alloc_inp_K_shift = true;
  4296. }
  4297. // view tensors are not processed further
  4298. if (cur->view_src != nullptr) {
  4299. return;
  4300. }
  4301. if (cur->op != GGML_OP_NONE) {
  4302. n_non_view++;
  4303. }
  4304. //
  4305. // offload layers
  4306. //
  4307. // TODO: will be removed with backend v2
  4308. //#define LLAMA_OFFLOAD_DEBUG
  4309. if (!do_offload) {
  4310. return;
  4311. }
  4312. const int n_layer = model.hparams.n_layer;
  4313. const int n_gpu_layers = model.n_gpu_layers;
  4314. const int i_gpu_start = n_layer - n_gpu_layers;
  4315. // should we offload the final norm? yes if we are not computing embeddings
  4316. const bool offload_emb = lctx.embedding.empty();
  4317. static const std::unordered_map<llm_offload_func_e, std::string, std::hash<int>> k_offload_func_name = {
  4318. { OFFLOAD_FUNC_NOP, "CPU" },
  4319. { OFFLOAD_FUNC_OUT, "CPU" },
  4320. #ifdef GGML_USE_CUBLAS
  4321. { OFFLOAD_FUNC, "GPU (CUDA)" },
  4322. { OFFLOAD_FUNC_KQ, "GPU (CUDA) KQ" },
  4323. { OFFLOAD_FUNC_V, "GPU (CUDA) V" },
  4324. { OFFLOAD_FUNC_NR, "GPU (CUDA) NR" },
  4325. { OFFLOAD_FUNC_EMB, "GPU (CUDA) EMB" },
  4326. #else
  4327. { OFFLOAD_FUNC, "CPU" },
  4328. { OFFLOAD_FUNC_KQ, "CPU" },
  4329. { OFFLOAD_FUNC_V, "CPU" },
  4330. { OFFLOAD_FUNC_NR, "CPU" },
  4331. { OFFLOAD_FUNC_EMB, "CPU" },
  4332. #endif // GGML_USE_CUBLAS
  4333. };
  4334. // check the global map for what offload function to use for this tensor
  4335. llm_offload_func_e func_e = k_offload_func_trie.find(name);
  4336. if (func_e == OFFLOAD_FUNC_NOP) {
  4337. #ifdef LLAMA_OFFLOAD_DEBUG
  4338. // if a tensor hasn't been offloaded, we warn the user
  4339. if (worst_case) {
  4340. LLAMA_LOG_WARN("%s: %32s: not offloaded (ref: %s)\n", __func__,
  4341. cur->name, "https://github.com/ggerganov/llama.cpp/pull/3837");
  4342. }
  4343. #endif
  4344. return;
  4345. }
  4346. // count the number of layers and respect the provided n_gpu_layers
  4347. switch (func_e) {
  4348. case OFFLOAD_FUNC_NOP:
  4349. case OFFLOAD_FUNC_OUT:
  4350. break;
  4351. case OFFLOAD_FUNC:
  4352. if (n_gpu_layers < n_layer) {
  4353. if (il < i_gpu_start) {
  4354. func_e = OFFLOAD_FUNC_NOP;
  4355. }
  4356. }
  4357. break;
  4358. case OFFLOAD_FUNC_NR:
  4359. if (n_gpu_layers <= n_layer + 0) {
  4360. func_e = OFFLOAD_FUNC_NOP;
  4361. }
  4362. break;
  4363. case OFFLOAD_FUNC_V:
  4364. if (n_gpu_layers <= n_layer + 1) {
  4365. func_e = OFFLOAD_FUNC_NOP;
  4366. }
  4367. break;
  4368. case OFFLOAD_FUNC_KQ:
  4369. if (n_gpu_layers <= n_layer + 2) {
  4370. func_e = OFFLOAD_FUNC_NOP;
  4371. }
  4372. break;
  4373. case OFFLOAD_FUNC_EMB:
  4374. if (!offload_emb || n_gpu_layers < n_layer) {
  4375. func_e = OFFLOAD_FUNC_NOP;
  4376. }
  4377. break;
  4378. default: GGML_ASSERT(false);
  4379. }
  4380. offload_func_t func = ggml_offload_nop;
  4381. // this is needed for compatibility with Metal for example
  4382. #ifdef GGML_USE_CUBLAS
  4383. static offload_func_t ggml_offload_gpu = ggml_cuda_assign_buffers_no_alloc;
  4384. #else
  4385. static offload_func_t ggml_offload_gpu = ggml_offload_nop;
  4386. #endif
  4387. switch (func_e) {
  4388. case OFFLOAD_FUNC_NOP:
  4389. case OFFLOAD_FUNC_OUT: func = ggml_offload_nop; break;
  4390. case OFFLOAD_FUNC:
  4391. case OFFLOAD_FUNC_KQ:
  4392. case OFFLOAD_FUNC_V:
  4393. case OFFLOAD_FUNC_NR:
  4394. case OFFLOAD_FUNC_EMB: func = ggml_offload_gpu; break;
  4395. default: GGML_ASSERT(false);
  4396. }
  4397. // apply offload function to the tensor
  4398. func(cur);
  4399. #ifdef LLAMA_OFFLOAD_DEBUG
  4400. if (worst_case) {
  4401. LLAMA_LOG_INFO("%s: %32s: %s\n", __func__, cur->name, k_offload_func_name.at(func_e).c_str());
  4402. }
  4403. #endif
  4404. };
  4405. struct ggml_cgraph * result = NULL;
  4406. struct llm_build_context llm(lctx, batch, cb, worst_case);
  4407. llm.init();
  4408. switch (model.arch) {
  4409. case LLM_ARCH_LLAMA:
  4410. {
  4411. result = llm.build_llama();
  4412. } break;
  4413. case LLM_ARCH_BAICHUAN:
  4414. {
  4415. result = llm.build_baichuan();
  4416. } break;
  4417. case LLM_ARCH_FALCON:
  4418. {
  4419. result = llm.build_falcon();
  4420. } break;
  4421. case LLM_ARCH_STARCODER:
  4422. {
  4423. result = llm.build_starcoder();
  4424. } break;
  4425. case LLM_ARCH_PERSIMMON:
  4426. {
  4427. result = llm.build_persimmon();
  4428. } break;
  4429. case LLM_ARCH_REFACT:
  4430. {
  4431. result = llm.build_refact();
  4432. } break;
  4433. case LLM_ARCH_BLOOM:
  4434. {
  4435. result = llm.build_bloom();
  4436. } break;
  4437. case LLM_ARCH_MPT:
  4438. {
  4439. result = llm.build_mpt();
  4440. } break;
  4441. case LLM_ARCH_STABLELM:
  4442. {
  4443. result = llm.build_stablelm();
  4444. } break;
  4445. default:
  4446. GGML_ASSERT(false);
  4447. }
  4448. llm.free();
  4449. if (worst_case) {
  4450. int n_non_view_total = 0;
  4451. for (int i = 0; i < result->n_nodes; ++i) {
  4452. if (result->nodes[i]->view_src == nullptr) {
  4453. n_non_view_total++;
  4454. }
  4455. }
  4456. LLAMA_LOG_INFO("%s: non-view tensors processed: %d/%d\n", __func__, n_non_view, n_non_view_total);
  4457. if (n_non_view != n_non_view_total) {
  4458. LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__);
  4459. LLAMA_LOG_WARN("%s: not all non-view tensors have been processed with a callback\n", __func__);
  4460. LLAMA_LOG_WARN("%s: this can indicate an inefficiency in the graph implementation\n", __func__);
  4461. LLAMA_LOG_WARN("%s: build with LLAMA_OFFLOAD_DEBUG for more info\n", __func__);
  4462. LLAMA_LOG_WARN("%s: ref: https://github.com/ggerganov/llama.cpp/pull/3837\n", __func__);
  4463. LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__);
  4464. }
  4465. }
  4466. return result;
  4467. }
  4468. // decode a batch of tokens by evaluating the transformer
  4469. //
  4470. // - lctx: llama context
  4471. // - batch: batch to evaluate
  4472. //
  4473. // return 0 on success
  4474. // return positive int on warning
  4475. // return negative int on error
  4476. //
  4477. static int llama_decode_internal(
  4478. llama_context & lctx,
  4479. llama_batch batch) {
  4480. const uint32_t n_tokens = batch.n_tokens;
  4481. if (n_tokens == 0) {
  4482. LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
  4483. return -1;
  4484. }
  4485. const auto & model = lctx.model;
  4486. const auto & hparams = model.hparams;
  4487. const auto & cparams = lctx.cparams;
  4488. const auto n_batch = cparams.n_batch;
  4489. GGML_ASSERT(n_tokens <= n_batch);
  4490. int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
  4491. GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
  4492. const int64_t t_start_us = ggml_time_us();
  4493. #ifdef GGML_USE_MPI
  4494. // TODO: needs fix after #3228
  4495. GGML_ASSERT(false && "not implemented");
  4496. //ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
  4497. #endif
  4498. GGML_ASSERT(n_threads > 0);
  4499. auto & kv_self = lctx.kv_self;
  4500. GGML_ASSERT(!!kv_self.ctx);
  4501. const int64_t n_embd = hparams.n_embd;
  4502. const int64_t n_vocab = hparams.n_vocab;
  4503. // helpers for smoother batch API transistion
  4504. // after deprecating the llama_eval calls, these will be removed
  4505. std::vector<llama_pos> pos;
  4506. std::vector<int32_t> n_seq_id;
  4507. std::vector<llama_seq_id *> seq_id_arr;
  4508. std::vector<std::vector<llama_seq_id>> seq_id;
  4509. if (batch.pos == nullptr) {
  4510. pos.resize(n_tokens);
  4511. for (uint32_t i = 0; i < n_tokens; i++) {
  4512. pos[i] = batch.all_pos_0 + i*batch.all_pos_1;
  4513. }
  4514. batch.pos = pos.data();
  4515. }
  4516. if (batch.seq_id == nullptr) {
  4517. n_seq_id.resize(n_tokens);
  4518. seq_id.resize(n_tokens);
  4519. seq_id_arr.resize(n_tokens);
  4520. for (uint32_t i = 0; i < n_tokens; i++) {
  4521. n_seq_id[i] = 1;
  4522. seq_id[i].resize(1);
  4523. seq_id[i][0] = batch.all_seq_id;
  4524. seq_id_arr[i] = seq_id[i].data();
  4525. }
  4526. batch.n_seq_id = n_seq_id.data();
  4527. batch.seq_id = seq_id_arr.data();
  4528. }
  4529. if (!llama_kv_cache_find_slot(kv_self, batch)) {
  4530. return 1;
  4531. }
  4532. // a heuristic, to avoid attending the full cache if it is not yet utilized
  4533. // after enough generations, the benefit from this heuristic disappears
  4534. // if we start defragmenting the cache, the benefit from this will be more important
  4535. //kv_self.n = std::max(32, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32)); // TODO: this might be better for CUDA?
  4536. kv_self.n = std::min((int32_t) cparams.n_ctx, std::max(32, llama_kv_cache_cell_max(kv_self)));
  4537. //printf("kv_self.n = %d\n", kv_self.n);
  4538. ggml_allocr_reset(lctx.alloc);
  4539. ggml_cgraph * gf = llama_build_graph(lctx, batch);
  4540. ggml_allocr_alloc_graph(lctx.alloc, gf);
  4541. struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
  4542. struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
  4543. GGML_ASSERT(strcmp(res->name, "result_output") == 0);
  4544. GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
  4545. #ifdef GGML_USE_CUBLAS
  4546. for (int i = 0; i < gf->n_leafs; i++) {
  4547. ggml_tensor * node = gf->leafs[i];
  4548. if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
  4549. ggml_cuda_assign_scratch_offset(node, (char*)node->data - (char *) lctx.buf_alloc.data);
  4550. ggml_cuda_copy_to_device(node);
  4551. }
  4552. }
  4553. for (int i = 0; i < gf->n_nodes; i++) {
  4554. ggml_tensor * node = gf->nodes[i];
  4555. if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
  4556. ggml_cuda_assign_scratch_offset(node, (char*)node->data - (char *) lctx.buf_alloc.data);
  4557. }
  4558. }
  4559. // HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed
  4560. if (!lctx.embedding.empty()) {
  4561. embeddings->backend = GGML_BACKEND_CPU;
  4562. }
  4563. res->backend = GGML_BACKEND_CPU;
  4564. #endif
  4565. // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
  4566. // for big prompts, if BLAS is enabled, it is better to use only one thread
  4567. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  4568. // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
  4569. // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
  4570. // with the BLAS calls. need a better solution
  4571. if (n_tokens >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
  4572. n_threads = std::min(4, n_threads);
  4573. }
  4574. // If all tensors can be run on the GPU then using more than 1 thread is detrimental.
  4575. const bool full_offload_supported =
  4576. model.arch == LLM_ARCH_LLAMA ||
  4577. model.arch == LLM_ARCH_BAICHUAN ||
  4578. model.arch == LLM_ARCH_FALCON ||
  4579. model.arch == LLM_ARCH_REFACT ||
  4580. model.arch == LLM_ARCH_MPT ||
  4581. model.arch == LLM_ARCH_STARCODER ||
  4582. model.arch == LLM_ARCH_STABLELM;
  4583. const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
  4584. if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {
  4585. n_threads = 1;
  4586. }
  4587. #if GGML_USE_MPI
  4588. const int64_t n_layer = hparams.n_layer;
  4589. ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
  4590. #endif
  4591. #ifdef GGML_USE_METAL
  4592. if (lctx.ctx_metal) {
  4593. ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
  4594. ggml_metal_graph_compute(lctx.ctx_metal, gf);
  4595. } else {
  4596. ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
  4597. }
  4598. #else
  4599. ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
  4600. #endif
  4601. #if GGML_USE_MPI
  4602. ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
  4603. #endif
  4604. // update the kv ring buffer
  4605. {
  4606. if (kv_self.has_shift) {
  4607. kv_self.has_shift = false;
  4608. for (uint32_t i = 0; i < kv_self.size; ++i) {
  4609. kv_self.cells[i].delta = 0;
  4610. }
  4611. }
  4612. kv_self.head += n_tokens;
  4613. // Ensure kv cache head points to a valid index.
  4614. if (kv_self.head >= kv_self.size) {
  4615. kv_self.head = 0;
  4616. }
  4617. }
  4618. #ifdef GGML_PERF
  4619. // print timing information per ggml operation (for debugging purposes)
  4620. // requires GGML_PERF to be defined
  4621. ggml_graph_print(gf);
  4622. #endif
  4623. // plot the computation graph in dot format (for debugging purposes)
  4624. //if (n_past%100 == 0) {
  4625. // ggml_graph_dump_dot(gf, NULL, "llama.dot");
  4626. //}
  4627. // extract logits
  4628. // TODO: do not compute and extract logits if only embeddings are needed
  4629. // need to update the graphs to skip "result_output"
  4630. {
  4631. auto & logits_out = lctx.logits;
  4632. if (batch.logits) {
  4633. logits_out.resize(n_vocab * n_tokens);
  4634. for (uint32_t i = 0; i < n_tokens; i++) {
  4635. if (batch.logits[i] == 0) {
  4636. continue;
  4637. }
  4638. memcpy(logits_out.data() + (n_vocab*i), (float *) ggml_get_data(res) + (n_vocab*i), sizeof(float)*n_vocab);
  4639. }
  4640. } else if (lctx.logits_all) {
  4641. logits_out.resize(n_vocab * n_tokens);
  4642. memcpy(logits_out.data(), (float *) ggml_get_data(res), sizeof(float)*n_vocab*n_tokens);
  4643. } else {
  4644. logits_out.resize(n_vocab);
  4645. memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(n_tokens - 1)), sizeof(float)*n_vocab);
  4646. }
  4647. }
  4648. // extract embeddings
  4649. if (!lctx.embedding.empty()) {
  4650. auto & embedding_out = lctx.embedding;
  4651. embedding_out.resize(n_embd);
  4652. memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(n_tokens - 1)), sizeof(float)*n_embd);
  4653. }
  4654. // measure the performance only for the single-token evals
  4655. if (n_tokens == 1) {
  4656. lctx.t_eval_us += ggml_time_us() - t_start_us;
  4657. lctx.n_eval++;
  4658. }
  4659. else if (n_tokens > 1) {
  4660. lctx.t_p_eval_us += ggml_time_us() - t_start_us;
  4661. lctx.n_p_eval += n_tokens;
  4662. }
  4663. // get a more accurate load time, upon first eval
  4664. // TODO: fix this
  4665. if (!lctx.has_evaluated_once) {
  4666. lctx.t_load_us = ggml_time_us() - lctx.t_start_us;
  4667. lctx.has_evaluated_once = true;
  4668. }
  4669. return 0;
  4670. }
  4671. //
  4672. // tokenizer
  4673. //
  4674. static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
  4675. return vocab.type;
  4676. }
  4677. static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
  4678. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL;
  4679. }
  4680. static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
  4681. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_UNKNOWN;
  4682. }
  4683. static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
  4684. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_CONTROL;
  4685. }
  4686. static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
  4687. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE;
  4688. }
  4689. static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id) {
  4690. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_USER_DEFINED;
  4691. }
  4692. static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
  4693. GGML_ASSERT(llama_is_byte_token(vocab, id));
  4694. const auto& token_data = vocab.id_to_token.at(id);
  4695. switch (llama_vocab_get_type(vocab)) {
  4696. case LLAMA_VOCAB_TYPE_SPM: {
  4697. auto buf = token_data.text.substr(3, 2);
  4698. return strtol(buf.c_str(), NULL, 16);
  4699. }
  4700. case LLAMA_VOCAB_TYPE_BPE: {
  4701. GGML_ASSERT(false);
  4702. return unicode_to_bytes_bpe(token_data.text);
  4703. }
  4704. default:
  4705. GGML_ASSERT(false);
  4706. }
  4707. }
  4708. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
  4709. static const char * hex = "0123456789ABCDEF";
  4710. switch (llama_vocab_get_type(vocab)) {
  4711. case LLAMA_VOCAB_TYPE_SPM: {
  4712. const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
  4713. return vocab.token_to_id.at(buf);
  4714. }
  4715. case LLAMA_VOCAB_TYPE_BPE: {
  4716. return vocab.token_to_id.at(bytes_to_unicode_bpe(ch));
  4717. }
  4718. default:
  4719. GGML_ASSERT(false);
  4720. }
  4721. }
  4722. static void llama_escape_whitespace(std::string & text) {
  4723. replace_all(text, " ", "\xe2\x96\x81");
  4724. }
  4725. static void llama_unescape_whitespace(std::string & word) {
  4726. replace_all(word, "\xe2\x96\x81", " ");
  4727. }
  4728. struct llm_symbol {
  4729. using index = int;
  4730. index prev;
  4731. index next;
  4732. const char * text;
  4733. size_t n;
  4734. };
  4735. static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
  4736. // SPM tokenizer
  4737. // original implementation:
  4738. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  4739. struct llm_bigram_spm {
  4740. struct comparator {
  4741. bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
  4742. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  4743. }
  4744. };
  4745. using queue_storage = std::vector<llm_bigram_spm>;
  4746. using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
  4747. llm_symbol::index left;
  4748. llm_symbol::index right;
  4749. float score;
  4750. size_t size;
  4751. };
  4752. struct llm_tokenizer_spm {
  4753. llm_tokenizer_spm(const llama_vocab & vocab): vocab(vocab) {}
  4754. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  4755. // split string into utf8 chars
  4756. int index = 0;
  4757. size_t offs = 0;
  4758. while (offs < text.size()) {
  4759. llm_symbol sym;
  4760. size_t len = utf8_len(text[offs]);
  4761. sym.text = text.c_str() + offs;
  4762. sym.n = std::min(len, text.size() - offs);
  4763. offs += sym.n;
  4764. sym.prev = index - 1;
  4765. sym.next = offs == text.size() ? -1 : index + 1;
  4766. index++;
  4767. symbols.emplace_back(sym);
  4768. }
  4769. // seed the work queue with all possible 2-character tokens.
  4770. for (size_t i = 1; i < symbols.size(); ++i) {
  4771. try_add_bigram(i - 1, i);
  4772. }
  4773. // keep substituting the highest frequency pairs for as long as we can.
  4774. while (!work_queue.empty()) {
  4775. auto bigram = work_queue.top();
  4776. work_queue.pop();
  4777. auto & left_sym = symbols[bigram.left];
  4778. auto & right_sym = symbols[bigram.right];
  4779. // if one of the symbols already got merged, skip it.
  4780. if (left_sym.n == 0 || right_sym.n == 0 ||
  4781. left_sym.n + right_sym.n != bigram.size) {
  4782. continue;
  4783. }
  4784. // merge the right sym into the left one
  4785. left_sym.n += right_sym.n;
  4786. right_sym.n = 0;
  4787. //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  4788. // remove the right sym from the chain
  4789. left_sym.next = right_sym.next;
  4790. if (right_sym.next >= 0) {
  4791. symbols[right_sym.next].prev = bigram.left;
  4792. }
  4793. // find more substitutions
  4794. try_add_bigram(left_sym.prev, bigram.left);
  4795. try_add_bigram(bigram.left, left_sym.next);
  4796. }
  4797. for (int i = 0; i != -1; i = symbols[i].next) {
  4798. auto & symbol = symbols[i];
  4799. resegment(symbol, output);
  4800. }
  4801. }
  4802. private:
  4803. void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
  4804. auto text = std::string(symbol.text, symbol.n);
  4805. auto token = vocab.token_to_id.find(text);
  4806. // Do we need to support is_unused?
  4807. if (token != vocab.token_to_id.end()) {
  4808. output.push_back((*token).second);
  4809. return;
  4810. }
  4811. const auto p = rev_merge.find(text);
  4812. if (p == rev_merge.end()) {
  4813. // output any symbols that did not form tokens as bytes.
  4814. for (int j = 0; j < (int)symbol.n; ++j) {
  4815. llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
  4816. output.push_back(token_id);
  4817. }
  4818. return;
  4819. }
  4820. resegment(symbols[p->second.first], output);
  4821. resegment(symbols[p->second.second], output);
  4822. }
  4823. void try_add_bigram(int left, int right) {
  4824. if (left == -1 || right == -1) {
  4825. return;
  4826. }
  4827. const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
  4828. auto token = vocab.token_to_id.find(text);
  4829. if (token == vocab.token_to_id.end()) {
  4830. return;
  4831. }
  4832. if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
  4833. return;
  4834. }
  4835. const auto & tok_data = vocab.id_to_token[(*token).second];
  4836. llm_bigram_spm bigram;
  4837. bigram.left = left;
  4838. bigram.right = right;
  4839. bigram.score = tok_data.score;
  4840. bigram.size = text.size();
  4841. work_queue.push(bigram);
  4842. // Do we need to support is_unused?
  4843. rev_merge[text] = std::make_pair(left, right);
  4844. }
  4845. const llama_vocab & vocab;
  4846. std::vector<llm_symbol> symbols;
  4847. llm_bigram_spm::queue work_queue;
  4848. std::map<std::string, std::pair<int, int>> rev_merge;
  4849. };
  4850. // BPE tokenizer
  4851. // adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
  4852. // tried to simplify unicode stuff, so most likely does not work 100% correctly!
  4853. // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
  4854. struct llm_bigram_bpe {
  4855. struct comparator {
  4856. bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
  4857. return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
  4858. }
  4859. };
  4860. using queue_storage = std::vector<llm_bigram_bpe>;
  4861. using queue = std::priority_queue<llm_bigram_bpe, queue_storage, comparator>;
  4862. llm_symbol::index left;
  4863. llm_symbol::index right;
  4864. std::string text;
  4865. int rank;
  4866. size_t size;
  4867. };
  4868. struct llm_tokenizer_bpe {
  4869. llm_tokenizer_bpe(const llama_vocab & vocab): vocab(vocab) {}
  4870. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  4871. int final_prev_index = -1;
  4872. auto word_collection = bpe_gpt2_preprocess(text);
  4873. symbols_final.clear();
  4874. for (auto & word : word_collection) {
  4875. work_queue = llm_bigram_bpe::queue();
  4876. symbols.clear();
  4877. int index = 0;
  4878. size_t offset = 0;
  4879. while (offset < word.size()) {
  4880. llm_symbol sym;
  4881. size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
  4882. sym.text = word.c_str() + offset;
  4883. sym.n = char_len;
  4884. offset += sym.n;
  4885. sym.prev = index - 1;
  4886. sym.next = offset == word.size() ? -1 : index + 1;
  4887. index++;
  4888. symbols.emplace_back(sym);
  4889. }
  4890. for (size_t i = 1; i < symbols.size(); ++i) {
  4891. add_new_bigram(i - 1, i);
  4892. }
  4893. // build token(s)
  4894. while (!work_queue.empty()) {
  4895. auto bigram = work_queue.top();
  4896. work_queue.pop();
  4897. auto & left_symbol = symbols[bigram.left];
  4898. auto & right_symbol = symbols[bigram.right];
  4899. if (left_symbol.n == 0 || right_symbol.n == 0) {
  4900. continue;
  4901. }
  4902. std::string left_token = std::string(left_symbol.text, left_symbol.n);
  4903. std::string right_token = std::string(right_symbol.text, right_symbol.n);
  4904. if (left_token + right_token != bigram.text) {
  4905. continue; // Skip this bigram if it's outdated
  4906. }
  4907. // merge the right sym into the left one
  4908. left_symbol.n += right_symbol.n;
  4909. right_symbol.n = 0;
  4910. // remove the right sym from the chain
  4911. left_symbol.next = right_symbol.next;
  4912. if (right_symbol.next >= 0) {
  4913. symbols[right_symbol.next].prev = bigram.left;
  4914. }
  4915. add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
  4916. add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
  4917. }
  4918. // add the fnished tokens to the final list keeping correct order for next and prev
  4919. for (auto & sym : symbols) {
  4920. if (sym.n > 0) {
  4921. sym.prev = final_prev_index;
  4922. sym.next = -1;
  4923. if (final_prev_index != -1) {
  4924. symbols_final[final_prev_index].next = symbols_final.size();
  4925. }
  4926. symbols_final.emplace_back(sym);
  4927. final_prev_index = symbols_final.size() - 1;
  4928. }
  4929. }
  4930. }
  4931. symbols = symbols_final;
  4932. if (!symbols.empty()) {
  4933. for (int i = 0; i != -1; i = symbols[i].next) {
  4934. auto & symbol = symbols[i];
  4935. if (symbol.n == 0) {
  4936. continue;
  4937. }
  4938. const std::string str = std::string(symbol.text, symbol.n);
  4939. const auto token = vocab.token_to_id.find(str);
  4940. if (token == vocab.token_to_id.end()) {
  4941. for (auto j = str.begin(); j != str.end(); ++j) {
  4942. std::string byte_str(1, *j);
  4943. auto token_multibyte = vocab.token_to_id.find(byte_str);
  4944. if (token_multibyte == vocab.token_to_id.end()) {
  4945. throw std::runtime_error("ERROR: byte not found in vocab");
  4946. }
  4947. output.push_back((*token_multibyte).second);
  4948. }
  4949. } else {
  4950. output.push_back((*token).second);
  4951. }
  4952. }
  4953. }
  4954. }
  4955. private:
  4956. void add_new_bigram(int left, int right) {
  4957. if (left == -1 || right == -1) {
  4958. return;
  4959. }
  4960. std::string left_token = std::string(symbols[left].text, symbols[left].n);
  4961. std::string right_token = std::string(symbols[right].text, symbols[right].n);
  4962. int rank_found = -1;
  4963. rank_found = vocab.find_bpe_rank(left_token, right_token);
  4964. if (rank_found < 0) {
  4965. return;
  4966. }
  4967. llm_bigram_bpe bigram;
  4968. bigram.left = left;
  4969. bigram.right = right;
  4970. bigram.text = left_token + right_token;
  4971. bigram.size = left_token.size() + right_token.size();
  4972. bigram.rank = rank_found;
  4973. work_queue.push(bigram);
  4974. }
  4975. std::vector<std::string> bpe_gpt2_preprocess(const std::string & text) {
  4976. std::vector<std::string> bpe_words;
  4977. std::vector<std::string> bpe_encoded_words;
  4978. std::string token = "";
  4979. // GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+
  4980. bool collecting_numeric = false;
  4981. bool collecting_letter = false;
  4982. bool collecting_special = false;
  4983. bool collecting_whitespace_lookahead = false;
  4984. bool collecting = false;
  4985. std::vector<std::string> text_utf;
  4986. text_utf.reserve(text.size());
  4987. bpe_words.reserve(text.size());
  4988. bpe_encoded_words.reserve(text.size());
  4989. auto cps = codepoints_from_utf8(text);
  4990. for (size_t i = 0; i < cps.size(); ++i)
  4991. text_utf.emplace_back(codepoint_to_utf8(cps[i]));
  4992. for (int i = 0; i < (int)text_utf.size(); i++) {
  4993. const std::string & utf_char = text_utf[i];
  4994. bool split_condition = false;
  4995. int bytes_remain = text_utf.size() - i;
  4996. // forward backward lookups
  4997. const std::string & utf_char_next = (i + 1 < (int)text_utf.size()) ? text_utf[i + 1] : "";
  4998. const std::string & utf_char_next_next = (i + 2 < (int)text_utf.size()) ? text_utf[i + 2] : "";
  4999. // handling contractions
  5000. if (!split_condition && bytes_remain >= 2) {
  5001. // 's|'t|'m|'d
  5002. if (utf_char == "\'" && (utf_char_next == "s" || utf_char_next == "t" || utf_char_next == "m" || utf_char_next == "d")) {
  5003. split_condition = true;
  5004. }
  5005. if (split_condition) {
  5006. if (token.size()) {
  5007. bpe_words.emplace_back(token); // push previous content as token
  5008. }
  5009. token = utf_char + utf_char_next;
  5010. bpe_words.emplace_back(token);
  5011. token = "";
  5012. i++;
  5013. continue;
  5014. }
  5015. }
  5016. if (!split_condition && bytes_remain >= 3) {
  5017. // 're|'ve|'ll
  5018. if (utf_char == "\'" && (
  5019. (utf_char_next == "r" && utf_char_next_next == "e") ||
  5020. (utf_char_next == "v" && utf_char_next_next == "e") ||
  5021. (utf_char_next == "l" && utf_char_next_next == "l"))
  5022. ) {
  5023. split_condition = true;
  5024. }
  5025. if (split_condition) {
  5026. // current token + next token can be defined
  5027. if (token.size()) {
  5028. bpe_words.emplace_back(token); // push previous content as token
  5029. }
  5030. token = utf_char + utf_char_next + utf_char_next_next;
  5031. bpe_words.emplace_back(token); // the contraction
  5032. token = "";
  5033. i += 2;
  5034. continue;
  5035. }
  5036. }
  5037. if (!split_condition && !collecting) {
  5038. if (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER)) {
  5039. collecting_letter = true;
  5040. collecting = true;
  5041. }
  5042. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
  5043. collecting_numeric = true;
  5044. collecting = true;
  5045. }
  5046. else if (
  5047. ((codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) && (codepoint_type(utf_char) != CODEPOINT_TYPE_WHITESPACE)) ||
  5048. (!token.size() && utf_char == " " && codepoint_type(utf_char_next) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char_next) != CODEPOINT_TYPE_DIGIT && codepoint_type(utf_char_next) != CODEPOINT_TYPE_WHITESPACE)
  5049. ) {
  5050. collecting_special = true;
  5051. collecting = true;
  5052. }
  5053. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE && codepoint_type(utf_char_next) == CODEPOINT_TYPE_WHITESPACE) {
  5054. collecting_whitespace_lookahead = true;
  5055. collecting = true;
  5056. }
  5057. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE) {
  5058. split_condition = true;
  5059. }
  5060. }
  5061. else if (!split_condition && collecting) {
  5062. if (collecting_letter && codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER) {
  5063. split_condition = true;
  5064. }
  5065. else if (collecting_numeric && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) {
  5066. split_condition = true;
  5067. }
  5068. else if (collecting_special && (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE)) {
  5069. split_condition = true;
  5070. }
  5071. else if (collecting_whitespace_lookahead && (codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
  5072. split_condition = true;
  5073. }
  5074. }
  5075. if (utf_char_next == "") {
  5076. split_condition = true; // final
  5077. token += utf_char;
  5078. }
  5079. if (split_condition) {
  5080. if (token.size()) {
  5081. bpe_words.emplace_back(token);
  5082. }
  5083. token = utf_char;
  5084. collecting = false;
  5085. collecting_letter = false;
  5086. collecting_numeric = false;
  5087. collecting_special = false;
  5088. collecting_whitespace_lookahead = false;
  5089. }
  5090. else {
  5091. token += utf_char;
  5092. }
  5093. }
  5094. for (std::string & word : bpe_words) {
  5095. std::string encoded_token = "";
  5096. for (char & c : word) {
  5097. encoded_token += bytes_to_unicode_bpe(c);
  5098. }
  5099. bpe_encoded_words.emplace_back(encoded_token);
  5100. }
  5101. return bpe_encoded_words;
  5102. }
  5103. const llama_vocab & vocab;
  5104. std::vector<llm_symbol> symbols;
  5105. std::vector<llm_symbol> symbols_final;
  5106. llm_bigram_bpe::queue work_queue;
  5107. };
  5108. typedef enum FRAGMENT_BUFFER_VARIANT_TYPE{
  5109. FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
  5110. FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
  5111. } FRAGMENT_BUFFER_VARIANT_TYPE;
  5112. struct fragment_buffer_variant{
  5113. fragment_buffer_variant(llama_vocab::id _token)
  5114. :
  5115. type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
  5116. token(_token),
  5117. raw_text(_dummy),
  5118. offset(0),
  5119. length(0){}
  5120. fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
  5121. :
  5122. type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
  5123. token((llama_vocab::id)-1),
  5124. raw_text(_raw_text),
  5125. offset(_offset),
  5126. length(_length){
  5127. GGML_ASSERT( _offset >= 0 );
  5128. GGML_ASSERT( _length >= 1 );
  5129. GGML_ASSERT( offset + length <= raw_text.length() );
  5130. }
  5131. const FRAGMENT_BUFFER_VARIANT_TYPE type;
  5132. const llama_vocab::id token;
  5133. const std::string _dummy;
  5134. const std::string & raw_text;
  5135. const uint64_t offset;
  5136. const uint64_t length;
  5137. };
  5138. // #define PRETOKENIZERDEBUG
  5139. static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer)
  5140. {
  5141. // for each special token
  5142. for (const auto & st: vocab.special_tokens_cache) {
  5143. const auto & special_token = st.first;
  5144. const auto & special_id = st.second;
  5145. // for each text fragment
  5146. std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
  5147. while (it != buffer.end()) {
  5148. auto & fragment = (*it);
  5149. // if a fragment is text ( not yet processed )
  5150. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  5151. auto * raw_text = &(fragment.raw_text);
  5152. auto raw_text_base_offset = fragment.offset;
  5153. auto raw_text_base_length = fragment.length;
  5154. // loop over the text
  5155. while (true) {
  5156. // find the first occurence of a given special token in this fragment
  5157. // passing offset argument only limit the "search area" but match coordinates
  5158. // are still relative to the source full raw_text
  5159. auto match = raw_text->find(special_token, raw_text_base_offset);
  5160. // no occurences found, stop processing this fragment for a given special token
  5161. if (match == std::string::npos) break;
  5162. // check if match is within bounds of offset <-> length
  5163. if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
  5164. #ifdef PRETOKENIZERDEBUG
  5165. fprintf(stderr, "FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  5166. #endif
  5167. auto source = std::distance(buffer.begin(), it);
  5168. // if match is further than base offset
  5169. // then we have some text to the left of it
  5170. if (match > raw_text_base_offset) {
  5171. // left
  5172. const int64_t left_reminder_offset = raw_text_base_offset + 0;
  5173. const int64_t left_reminder_length = match - raw_text_base_offset;
  5174. buffer.emplace_after(it, (*raw_text), left_reminder_offset, left_reminder_length);
  5175. #ifdef PRETOKENIZERDEBUG
  5176. fprintf(stderr, "FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
  5177. #endif
  5178. it++;
  5179. }
  5180. // special token
  5181. buffer.emplace_after(it, special_id);
  5182. it++;
  5183. // right
  5184. if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
  5185. const int64_t right_reminder_offset = match + special_token.length();
  5186. const int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
  5187. buffer.emplace_after(it, (*raw_text), right_reminder_offset, right_reminder_length);
  5188. #ifdef PRETOKENIZERDEBUG
  5189. fprintf(stderr, "FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
  5190. #endif
  5191. it++;
  5192. if (source == 0) {
  5193. buffer.erase_after(buffer.before_begin());
  5194. } else {
  5195. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  5196. }
  5197. // repeat for the right side
  5198. raw_text_base_offset = right_reminder_offset;
  5199. raw_text_base_length = right_reminder_length;
  5200. #ifdef PRETOKENIZERDEBUG
  5201. fprintf(stderr, "RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  5202. #endif
  5203. } else {
  5204. if (source == 0) {
  5205. buffer.erase_after(buffer.before_begin());
  5206. } else {
  5207. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  5208. }
  5209. break;
  5210. }
  5211. }
  5212. }
  5213. it++;
  5214. }
  5215. }
  5216. }
  5217. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special) {
  5218. std::vector<llama_vocab::id> output;
  5219. // OG tokenizer behavior:
  5220. //
  5221. // tokenizer.encode('', add_bos=True) returns [1]
  5222. // tokenizer.encode('', add_bos=False) returns []
  5223. if (bos && vocab.special_bos_id != -1) {
  5224. output.push_back(vocab.special_bos_id);
  5225. }
  5226. if (raw_text.empty()) {
  5227. return output;
  5228. }
  5229. std::forward_list<fragment_buffer_variant> fragment_buffer;
  5230. fragment_buffer.emplace_front( raw_text, 0, raw_text.length() );
  5231. if (special) tokenizer_st_partition( vocab, fragment_buffer );
  5232. switch (vocab.type) {
  5233. case LLAMA_VOCAB_TYPE_SPM:
  5234. {
  5235. for (const auto & fragment: fragment_buffer)
  5236. {
  5237. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT)
  5238. {
  5239. // without adding this leading whitespace, we do not get the same results as the original tokenizer
  5240. // TODO: It's likely possible to get rid of this string copy entirely
  5241. // by modifying llm_tokenizer_x to operate with string offsets like pre-tokenizer
  5242. // and passing 'add space prefix' as bool argument
  5243. //
  5244. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  5245. if (&fragment == &fragment_buffer.front()) {
  5246. raw_text = " " + raw_text; // prefix with space if the first token is not special
  5247. }
  5248. #ifdef PRETOKENIZERDEBUG
  5249. fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  5250. #endif
  5251. llm_tokenizer_spm tokenizer(vocab);
  5252. llama_escape_whitespace(raw_text);
  5253. tokenizer.tokenize(raw_text, output);
  5254. }
  5255. else // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  5256. {
  5257. output.push_back(fragment.token);
  5258. }
  5259. }
  5260. } break;
  5261. case LLAMA_VOCAB_TYPE_BPE:
  5262. {
  5263. for (const auto & fragment: fragment_buffer)
  5264. {
  5265. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT)
  5266. {
  5267. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  5268. #ifdef PRETOKENIZERDEBUG
  5269. fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  5270. #endif
  5271. llm_tokenizer_bpe tokenizer(vocab);
  5272. tokenizer.tokenize(raw_text, output);
  5273. }
  5274. else // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  5275. {
  5276. output.push_back(fragment.token);
  5277. }
  5278. }
  5279. } break;
  5280. }
  5281. return output;
  5282. }
  5283. //
  5284. // grammar - internal
  5285. //
  5286. struct llama_partial_utf8 {
  5287. uint32_t value; // bit value so far (unshifted)
  5288. int n_remain; // num bytes remaining; -1 indicates invalid sequence
  5289. };
  5290. struct llama_grammar {
  5291. const std::vector<std::vector<llama_grammar_element>> rules;
  5292. std::vector<std::vector<const llama_grammar_element *>> stacks;
  5293. // buffer for partially generated UTF-8 sequence from accepted tokens
  5294. llama_partial_utf8 partial_utf8;
  5295. };
  5296. struct llama_grammar_candidate {
  5297. size_t index;
  5298. const uint32_t * code_points;
  5299. llama_partial_utf8 partial_utf8;
  5300. };
  5301. // Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
  5302. // pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
  5303. static std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
  5304. const char * src,
  5305. llama_partial_utf8 partial_start) {
  5306. static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
  5307. const char * pos = src;
  5308. std::vector<uint32_t> code_points;
  5309. uint32_t value = partial_start.value;
  5310. int n_remain = partial_start.n_remain;
  5311. // continue previous decode, if applicable
  5312. while (*pos != 0 && n_remain > 0) {
  5313. uint8_t next_byte = static_cast<uint8_t>(*pos);
  5314. if ((next_byte >> 6) != 2) {
  5315. // invalid sequence, abort
  5316. code_points.push_back(0);
  5317. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, -1 });
  5318. }
  5319. value = (value << 6) + (next_byte & 0x3F);
  5320. ++pos;
  5321. --n_remain;
  5322. }
  5323. if (partial_start.n_remain > 0 && n_remain == 0) {
  5324. code_points.push_back(value);
  5325. }
  5326. // decode any subsequent utf-8 sequences, which may end in an incomplete one
  5327. while (*pos != 0) {
  5328. uint8_t first_byte = static_cast<uint8_t>(*pos);
  5329. uint8_t highbits = first_byte >> 4;
  5330. n_remain = lookup[highbits] - 1;
  5331. if (n_remain < 0) {
  5332. // invalid sequence, abort
  5333. code_points.clear();
  5334. code_points.push_back(0);
  5335. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, n_remain });
  5336. }
  5337. uint8_t mask = (1 << (7 - n_remain)) - 1;
  5338. value = first_byte & mask;
  5339. ++pos;
  5340. while (*pos != 0 && n_remain > 0) {
  5341. value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
  5342. ++pos;
  5343. --n_remain;
  5344. }
  5345. if (n_remain == 0) {
  5346. code_points.push_back(value);
  5347. }
  5348. }
  5349. code_points.push_back(0);
  5350. return std::make_pair(std::move(code_points), llama_partial_utf8{ value, n_remain });
  5351. }
  5352. // returns true iff pos points to the end of one of the definitions of a rule
  5353. static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
  5354. switch (pos->type) {
  5355. case LLAMA_GRETYPE_END: return true; // NOLINT
  5356. case LLAMA_GRETYPE_ALT: return true; // NOLINT
  5357. default: return false;
  5358. }
  5359. }
  5360. // returns true iff chr satisfies the char range at pos (regular or inverse range)
  5361. // asserts that pos is pointing to a char range element
  5362. static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
  5363. const llama_grammar_element * pos,
  5364. const uint32_t chr) {
  5365. bool found = false;
  5366. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  5367. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); // NOLINT
  5368. do {
  5369. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  5370. // inclusive range, e.g. [a-z]
  5371. found = found || (pos->value <= chr && chr <= pos[1].value);
  5372. pos += 2;
  5373. } else {
  5374. // exact char match, e.g. [a] or "a"
  5375. found = found || pos->value == chr;
  5376. pos += 1;
  5377. }
  5378. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  5379. return std::make_pair(found == is_positive_char, pos);
  5380. }
  5381. // returns true iff some continuation of the given partial UTF-8 sequence could satisfy the char
  5382. // range at pos (regular or inverse range)
  5383. // asserts that pos is pointing to a char range element
  5384. static bool llama_grammar_match_partial_char(
  5385. const llama_grammar_element * pos,
  5386. const llama_partial_utf8 partial_utf8) {
  5387. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  5388. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  5389. uint32_t partial_value = partial_utf8.value;
  5390. int n_remain = partial_utf8.n_remain;
  5391. // invalid sequence or 7-bit char split across 2 bytes (overlong)
  5392. if (n_remain < 0 || (n_remain == 1 && partial_value < 2)) {
  5393. return false;
  5394. }
  5395. // range of possible code points this partial UTF-8 sequence could complete to
  5396. uint32_t low = partial_value << (n_remain * 6);
  5397. uint32_t high = low | ((1 << (n_remain * 6)) - 1);
  5398. if (low == 0) {
  5399. if (n_remain == 2) {
  5400. low = 1 << 11;
  5401. } else if (n_remain == 3) {
  5402. low = 1 << 16;
  5403. }
  5404. }
  5405. do {
  5406. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  5407. // inclusive range, e.g. [a-z]
  5408. if (pos->value <= high && low <= pos[1].value) {
  5409. return is_positive_char;
  5410. }
  5411. pos += 2;
  5412. } else {
  5413. // exact char match, e.g. [a] or "a"
  5414. if (low <= pos->value && pos->value <= high) {
  5415. return is_positive_char;
  5416. }
  5417. pos += 1;
  5418. }
  5419. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  5420. return !is_positive_char;
  5421. }
  5422. // transforms a grammar pushdown stack into N possible stacks, all ending
  5423. // at a character range (terminal element)
  5424. static void llama_grammar_advance_stack(
  5425. const std::vector<std::vector<llama_grammar_element>> & rules,
  5426. const std::vector<const llama_grammar_element *> & stack,
  5427. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  5428. if (stack.empty()) {
  5429. new_stacks.emplace_back(stack);
  5430. return;
  5431. }
  5432. const llama_grammar_element * pos = stack.back();
  5433. switch (pos->type) {
  5434. case LLAMA_GRETYPE_RULE_REF: {
  5435. const size_t rule_id = static_cast<size_t>(pos->value);
  5436. const llama_grammar_element * subpos = rules[rule_id].data();
  5437. do {
  5438. // init new stack without the top (pos)
  5439. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  5440. if (!llama_grammar_is_end_of_sequence(pos + 1)) {
  5441. // if this rule ref is followed by another element, add that to stack
  5442. new_stack.push_back(pos + 1);
  5443. }
  5444. if (!llama_grammar_is_end_of_sequence(subpos)) {
  5445. // if alternate is nonempty, add to stack
  5446. new_stack.push_back(subpos);
  5447. }
  5448. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  5449. while (!llama_grammar_is_end_of_sequence(subpos)) {
  5450. // scan to end of alternate def
  5451. subpos++;
  5452. }
  5453. if (subpos->type == LLAMA_GRETYPE_ALT) {
  5454. // there's another alternate def of this rule to process
  5455. subpos++;
  5456. } else {
  5457. break;
  5458. }
  5459. } while (true);
  5460. break;
  5461. }
  5462. case LLAMA_GRETYPE_CHAR:
  5463. case LLAMA_GRETYPE_CHAR_NOT:
  5464. new_stacks.emplace_back(stack);
  5465. break;
  5466. default:
  5467. // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
  5468. // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
  5469. // those
  5470. GGML_ASSERT(false);
  5471. }
  5472. }
  5473. // takes a set of possible pushdown stacks on a grammar, which are required to
  5474. // be positioned at a character range (see `llama_grammar_advance_stack`), and
  5475. // produces the N possible stacks if the given char is accepted at those
  5476. // positions
  5477. static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
  5478. const std::vector<std::vector<llama_grammar_element>> & rules,
  5479. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  5480. const uint32_t chr) {
  5481. std::vector<std::vector<const llama_grammar_element *>> new_stacks;
  5482. for (const auto & stack : stacks) {
  5483. if (stack.empty()) {
  5484. continue;
  5485. }
  5486. auto match = llama_grammar_match_char(stack.back(), chr);
  5487. if (match.first) {
  5488. const llama_grammar_element * pos = match.second;
  5489. // update top of stack to next element, if any
  5490. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  5491. if (!llama_grammar_is_end_of_sequence(pos)) {
  5492. new_stack.push_back(pos);
  5493. }
  5494. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  5495. }
  5496. }
  5497. return new_stacks;
  5498. }
  5499. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  5500. const std::vector<std::vector<llama_grammar_element>> & rules,
  5501. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  5502. const std::vector<llama_grammar_candidate> & candidates);
  5503. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
  5504. const std::vector<std::vector<llama_grammar_element>> & rules,
  5505. const std::vector<const llama_grammar_element *> & stack,
  5506. const std::vector<llama_grammar_candidate> & candidates) {
  5507. std::vector<llama_grammar_candidate> rejects;
  5508. if (stack.empty()) {
  5509. for (const auto & tok : candidates) {
  5510. if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
  5511. rejects.push_back(tok);
  5512. }
  5513. }
  5514. return rejects;
  5515. }
  5516. const llama_grammar_element * stack_pos = stack.back();
  5517. std::vector<llama_grammar_candidate> next_candidates;
  5518. for (const auto & tok : candidates) {
  5519. if (*tok.code_points == 0) {
  5520. // reached end of full codepoints in token, reject iff it ended in a partial sequence
  5521. // that cannot satisfy this position in grammar
  5522. if (tok.partial_utf8.n_remain != 0 &&
  5523. !llama_grammar_match_partial_char(stack_pos, tok.partial_utf8)) {
  5524. rejects.push_back(tok);
  5525. }
  5526. } else if (llama_grammar_match_char(stack_pos, *tok.code_points).first) {
  5527. next_candidates.push_back({ tok.index, tok.code_points + 1, tok.partial_utf8 });
  5528. } else {
  5529. rejects.push_back(tok);
  5530. }
  5531. }
  5532. const auto * stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
  5533. // update top of stack to next element, if any
  5534. std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
  5535. if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
  5536. stack_after.push_back(stack_pos_after);
  5537. }
  5538. std::vector<std::vector<const llama_grammar_element *>> next_stacks;
  5539. llama_grammar_advance_stack(rules, stack_after, next_stacks);
  5540. auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
  5541. for (const auto & tok : next_rejects) {
  5542. rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
  5543. }
  5544. return rejects;
  5545. }
  5546. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  5547. const std::vector<std::vector<llama_grammar_element>> & rules,
  5548. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  5549. const std::vector<llama_grammar_candidate> & candidates) {
  5550. GGML_ASSERT(!stacks.empty()); // REVIEW
  5551. if (candidates.empty()) {
  5552. return std::vector<llama_grammar_candidate>();
  5553. }
  5554. auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
  5555. for (size_t i = 1, size = stacks.size(); i < size; ++i) {
  5556. rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
  5557. }
  5558. return rejects;
  5559. }
  5560. //
  5561. // grammar - external
  5562. //
  5563. struct llama_grammar * llama_grammar_init(
  5564. const llama_grammar_element ** rules,
  5565. size_t n_rules,
  5566. size_t start_rule_index) {
  5567. const llama_grammar_element * pos;
  5568. // copy rule definitions into vectors
  5569. std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
  5570. for (size_t i = 0; i < n_rules; i++) {
  5571. for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
  5572. vec_rules[i].push_back(*pos);
  5573. }
  5574. vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
  5575. }
  5576. // loop over alternates of start rule to build initial stacks
  5577. std::vector<std::vector<const llama_grammar_element *>> stacks;
  5578. pos = rules[start_rule_index];
  5579. do {
  5580. std::vector<const llama_grammar_element *> stack;
  5581. if (!llama_grammar_is_end_of_sequence(pos)) {
  5582. // if alternate is nonempty, add to stack
  5583. stack.push_back(pos);
  5584. }
  5585. llama_grammar_advance_stack(vec_rules, stack, stacks);
  5586. while (!llama_grammar_is_end_of_sequence(pos)) {
  5587. // scan to end of alternate def
  5588. pos++;
  5589. }
  5590. if (pos->type == LLAMA_GRETYPE_ALT) {
  5591. // there's another alternate def of this rule to process
  5592. pos++;
  5593. } else {
  5594. break;
  5595. }
  5596. } while (true);
  5597. return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} };
  5598. }
  5599. void llama_grammar_free(struct llama_grammar * grammar) {
  5600. delete grammar;
  5601. }
  5602. struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar) {
  5603. llama_grammar * result = new llama_grammar{ grammar->rules, grammar->stacks, grammar->partial_utf8 };
  5604. // redirect elements in stacks to point to new rules
  5605. for (size_t is = 0; is < result->stacks.size(); is++) {
  5606. for (size_t ie = 0; ie < result->stacks[is].size(); ie++) {
  5607. for (size_t ir0 = 0; ir0 < grammar->rules.size(); ir0++) {
  5608. for (size_t ir1 = 0; ir1 < grammar->rules[ir0].size(); ir1++) {
  5609. if (grammar->stacks[is][ie] == &grammar->rules[ir0][ir1]) {
  5610. result->stacks[is][ie] = &result->rules[ir0][ir1];
  5611. }
  5612. }
  5613. }
  5614. }
  5615. }
  5616. return result;
  5617. }
  5618. //
  5619. // sampling
  5620. //
  5621. void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
  5622. if (seed == LLAMA_DEFAULT_SEED) {
  5623. seed = time(NULL);
  5624. }
  5625. ctx->rng.seed(seed);
  5626. }
  5627. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  5628. GGML_ASSERT(candidates->size > 0);
  5629. const int64_t t_start_sample_us = ggml_time_us();
  5630. // Sort the logits in descending order
  5631. if (!candidates->sorted) {
  5632. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  5633. return a.logit > b.logit;
  5634. });
  5635. candidates->sorted = true;
  5636. }
  5637. float max_l = candidates->data[0].logit;
  5638. float cum_sum = 0.0f;
  5639. for (size_t i = 0; i < candidates->size; ++i) {
  5640. float p = expf(candidates->data[i].logit - max_l);
  5641. candidates->data[i].p = p;
  5642. cum_sum += p;
  5643. }
  5644. for (size_t i = 0; i < candidates->size; ++i) {
  5645. candidates->data[i].p /= cum_sum;
  5646. }
  5647. if (ctx) {
  5648. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5649. }
  5650. }
  5651. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep) {
  5652. const int64_t t_start_sample_us = ggml_time_us();
  5653. k = std::max(k, (int) min_keep);
  5654. k = std::min(k, (int) candidates->size);
  5655. // Sort scores in descending order
  5656. if (!candidates->sorted) {
  5657. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  5658. return a.logit > b.logit;
  5659. };
  5660. if (k == (int) candidates->size) {
  5661. std::sort(candidates->data, candidates->data + candidates->size, comp);
  5662. } else {
  5663. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  5664. }
  5665. candidates->sorted = true;
  5666. }
  5667. candidates->size = k;
  5668. if (ctx) {
  5669. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5670. }
  5671. }
  5672. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  5673. if (p >= 1.0f) {
  5674. return;
  5675. }
  5676. llama_sample_softmax(ctx, candidates);
  5677. const int64_t t_start_sample_us = ggml_time_us();
  5678. // Compute the cumulative probabilities
  5679. float cum_sum = 0.0f;
  5680. size_t last_idx = candidates->size;
  5681. for (size_t i = 0; i < candidates->size; ++i) {
  5682. cum_sum += candidates->data[i].p;
  5683. // Check if the running sum is at least p or if we have kept at least min_keep tokens
  5684. // we set the last index to i+1 to indicate that the current iterate should be included in the set
  5685. if (cum_sum >= p && i + 1 >= min_keep) {
  5686. last_idx = i + 1;
  5687. break;
  5688. }
  5689. }
  5690. // Resize the output vector to keep only the top-p tokens
  5691. candidates->size = last_idx;
  5692. if (ctx) {
  5693. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5694. }
  5695. }
  5696. void llama_sample_min_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  5697. if (p <= 0.0f || !candidates->size) {
  5698. return;
  5699. }
  5700. llama_sample_softmax(ctx, candidates);
  5701. const int64_t t_start_sample_us = ggml_time_us();
  5702. float scale = candidates->data[0].p; // scale by max prob
  5703. size_t i = 1; // first token always matches
  5704. for (; i < candidates->size; ++i) {
  5705. if (candidates->data[i].p < p * scale && i >= min_keep) {
  5706. break; // prob too small
  5707. }
  5708. }
  5709. // Resize the output vector to keep only the matching tokens
  5710. candidates->size = i;
  5711. if (ctx) {
  5712. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5713. }
  5714. }
  5715. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  5716. if (z >= 1.0f || candidates->size <= 2) {
  5717. return;
  5718. }
  5719. llama_sample_softmax(nullptr, candidates);
  5720. const int64_t t_start_sample_us = ggml_time_us();
  5721. // Compute the first and second derivatives
  5722. std::vector<float> first_derivatives(candidates->size - 1);
  5723. std::vector<float> second_derivatives(candidates->size - 2);
  5724. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  5725. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  5726. }
  5727. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  5728. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  5729. }
  5730. // Calculate absolute value of second derivatives
  5731. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  5732. second_derivatives[i] = std::abs(second_derivatives[i]);
  5733. }
  5734. // Normalize the second derivatives
  5735. {
  5736. const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  5737. if (second_derivatives_sum > 1e-6f) {
  5738. for (float & value : second_derivatives) {
  5739. value /= second_derivatives_sum;
  5740. }
  5741. } else {
  5742. for (float & value : second_derivatives) {
  5743. value = 1.0f / second_derivatives.size();
  5744. }
  5745. }
  5746. }
  5747. float cum_sum = 0.0f;
  5748. size_t last_idx = candidates->size;
  5749. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  5750. cum_sum += second_derivatives[i];
  5751. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  5752. if (cum_sum > z && i >= min_keep) {
  5753. last_idx = i;
  5754. break;
  5755. }
  5756. }
  5757. // Resize the output vector to keep only the tokens above the tail location
  5758. candidates->size = last_idx;
  5759. if (ctx) {
  5760. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5761. }
  5762. }
  5763. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  5764. // Reference implementation:
  5765. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  5766. if (p >= 1.0f) {
  5767. return;
  5768. }
  5769. // Compute the softmax of logits and calculate entropy
  5770. llama_sample_softmax(nullptr, candidates);
  5771. const int64_t t_start_sample_us = ggml_time_us();
  5772. float entropy = 0.0f;
  5773. for (size_t i = 0; i < candidates->size; ++i) {
  5774. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  5775. }
  5776. // Compute the absolute difference between negative log probability and entropy for each candidate
  5777. std::vector<float> shifted_scores;
  5778. for (size_t i = 0; i < candidates->size; ++i) {
  5779. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  5780. shifted_scores.push_back(shifted_score);
  5781. }
  5782. // Sort tokens based on the shifted_scores and their corresponding indices
  5783. std::vector<size_t> indices(candidates->size);
  5784. std::iota(indices.begin(), indices.end(), 0);
  5785. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  5786. return shifted_scores[a] < shifted_scores[b];
  5787. });
  5788. // Compute the cumulative probabilities
  5789. float cum_sum = 0.0f;
  5790. size_t last_idx = indices.size();
  5791. for (size_t i = 0; i < indices.size(); ++i) {
  5792. size_t idx = indices[i];
  5793. cum_sum += candidates->data[idx].p;
  5794. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  5795. if (cum_sum > p && i >= min_keep - 1) {
  5796. last_idx = i + 1;
  5797. break;
  5798. }
  5799. }
  5800. // Resize the output vector to keep only the locally typical tokens
  5801. std::vector<llama_token_data> new_candidates;
  5802. for (size_t i = 0; i < last_idx; ++i) {
  5803. size_t idx = indices[i];
  5804. new_candidates.push_back(candidates->data[idx]);
  5805. }
  5806. // Replace the data in candidates with the new_candidates data
  5807. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  5808. candidates->size = new_candidates.size();
  5809. if (ctx) {
  5810. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5811. }
  5812. }
  5813. void llama_sample_temp(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  5814. const int64_t t_start_sample_us = ggml_time_us();
  5815. for (size_t i = 0; i < candidates_p->size; ++i) {
  5816. candidates_p->data[i].logit /= temp;
  5817. }
  5818. if (ctx) {
  5819. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5820. }
  5821. }
  5822. void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  5823. llama_sample_temp(ctx, candidates_p, temp);
  5824. }
  5825. void llama_sample_repetition_penalties(
  5826. struct llama_context * ctx,
  5827. llama_token_data_array * candidates,
  5828. const llama_token * last_tokens,
  5829. size_t penalty_last_n,
  5830. float penalty_repeat,
  5831. float penalty_freq,
  5832. float penalty_present) {
  5833. if (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f)) {
  5834. return;
  5835. }
  5836. const int64_t t_start_sample_us = ggml_time_us();
  5837. // Create a frequency map to count occurrences of each token in last_tokens
  5838. std::unordered_map<llama_token, int> token_count;
  5839. for (size_t i = 0; i < penalty_last_n; ++i) {
  5840. token_count[last_tokens[i]]++;
  5841. }
  5842. // Apply frequency and presence penalties to the candidates
  5843. for (size_t i = 0; i < candidates->size; ++i) {
  5844. const auto token_iter = token_count.find(candidates->data[i].id);
  5845. if (token_iter == token_count.end()) {
  5846. continue;
  5847. }
  5848. const int count = token_iter->second;
  5849. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  5850. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  5851. if (candidates->data[i].logit <= 0) {
  5852. candidates->data[i].logit *= penalty_repeat;
  5853. } else {
  5854. candidates->data[i].logit /= penalty_repeat;
  5855. }
  5856. candidates->data[i].logit -= float(count) * penalty_freq + float(count > 0) * penalty_present;
  5857. }
  5858. candidates->sorted = false;
  5859. if (ctx) {
  5860. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5861. }
  5862. }
  5863. void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
  5864. GGML_ASSERT(ctx);
  5865. const int64_t t_start_sample_us = ggml_time_us();
  5866. bool allow_eos = false;
  5867. for (const auto & stack : grammar->stacks) {
  5868. if (stack.empty()) {
  5869. allow_eos = true;
  5870. break;
  5871. }
  5872. }
  5873. const llama_token eos = llama_token_eos(&ctx->model);
  5874. std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
  5875. std::vector<llama_grammar_candidate> candidates_grammar;
  5876. for (size_t i = 0; i < candidates->size; ++i) {
  5877. const llama_token id = candidates->data[i].id;
  5878. const std::string piece = llama_token_to_piece(ctx, id);
  5879. if (id == eos) {
  5880. if (!allow_eos) {
  5881. candidates->data[i].logit = -INFINITY;
  5882. }
  5883. } else if (piece.empty() || piece[0] == 0) {
  5884. candidates->data[i].logit = -INFINITY;
  5885. } else {
  5886. candidates_decoded.push_back(decode_utf8(piece.c_str(), grammar->partial_utf8));
  5887. candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second });
  5888. }
  5889. }
  5890. const auto rejects = llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
  5891. for (const auto & reject : rejects) {
  5892. candidates->data[reject.index].logit = -INFINITY;
  5893. }
  5894. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5895. }
  5896. static void llama_log_softmax(float * array, size_t size) {
  5897. float max_l = *std::max_element(array, array + size);
  5898. float sum = 0.f;
  5899. for (size_t i = 0; i < size; ++i) {
  5900. float p = expf(array[i] - max_l);
  5901. sum += p;
  5902. array[i] = p;
  5903. }
  5904. for (size_t i = 0; i < size; ++i) {
  5905. array[i] = logf(array[i] / sum);
  5906. }
  5907. }
  5908. void llama_sample_classifier_free_guidance(
  5909. struct llama_context * ctx,
  5910. llama_token_data_array * candidates,
  5911. struct llama_context * guidance_ctx,
  5912. float scale) {
  5913. int64_t t_start_sample_us = ggml_time_us();
  5914. GGML_ASSERT(ctx);
  5915. auto n_vocab = llama_n_vocab(llama_get_model(ctx));
  5916. GGML_ASSERT(n_vocab == (int)candidates->size);
  5917. GGML_ASSERT(!candidates->sorted);
  5918. std::vector<float> logits_base;
  5919. logits_base.reserve(candidates->size);
  5920. for (size_t i = 0; i < candidates->size; ++i) {
  5921. logits_base.push_back(candidates->data[i].logit);
  5922. }
  5923. llama_log_softmax(logits_base.data(), candidates->size);
  5924. float* logits_guidance = llama_get_logits(guidance_ctx);
  5925. llama_log_softmax(logits_guidance, n_vocab);
  5926. for (int i = 0; i < n_vocab; ++i) {
  5927. float logit_guidance = logits_guidance[i];
  5928. float logit_base = logits_base[i];
  5929. candidates->data[i].logit = scale * (logit_base - logit_guidance) + logit_guidance;
  5930. }
  5931. if (ctx) {
  5932. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5933. }
  5934. }
  5935. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) {
  5936. GGML_ASSERT(ctx);
  5937. auto N = float(llama_n_vocab(llama_get_model(ctx)));
  5938. int64_t t_start_sample_us;
  5939. t_start_sample_us = ggml_time_us();
  5940. llama_sample_softmax(nullptr, candidates);
  5941. // Estimate s_hat using the most probable m tokens
  5942. float s_hat = 0.0;
  5943. float sum_ti_bi = 0.0;
  5944. float sum_ti_sq = 0.0;
  5945. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  5946. float t_i = logf(float(i + 2) / float(i + 1));
  5947. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  5948. sum_ti_bi += t_i * b_i;
  5949. sum_ti_sq += t_i * t_i;
  5950. }
  5951. s_hat = sum_ti_bi / sum_ti_sq;
  5952. // Compute k from the estimated s_hat and target surprise value
  5953. float epsilon_hat = s_hat - 1;
  5954. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  5955. // Sample the next word X using top-k sampling
  5956. llama_sample_top_k(nullptr, candidates, int(k), 1);
  5957. if (ctx) {
  5958. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5959. }
  5960. llama_token X = llama_sample_token(ctx, candidates);
  5961. t_start_sample_us = ggml_time_us();
  5962. // Compute error as the difference between observed surprise and target surprise value
  5963. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  5964. return candidate.id == X;
  5965. }));
  5966. float observed_surprise = -log2f(candidates->data[X_idx].p);
  5967. float e = observed_surprise - tau;
  5968. // Update mu using the learning rate and error
  5969. *mu = *mu - eta * e;
  5970. if (ctx) {
  5971. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5972. }
  5973. return X;
  5974. }
  5975. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  5976. int64_t t_start_sample_us;
  5977. t_start_sample_us = ggml_time_us();
  5978. llama_sample_softmax(ctx, candidates);
  5979. // Truncate the words with surprise values greater than mu
  5980. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  5981. return -log2f(candidate.p) > *mu;
  5982. }));
  5983. if (candidates->size == 0) {
  5984. candidates->size = 1;
  5985. }
  5986. if (ctx) {
  5987. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  5988. }
  5989. // Normalize the probabilities of the remaining words
  5990. llama_sample_softmax(ctx, candidates);
  5991. // Sample the next word X from the remaining words
  5992. llama_token X = llama_sample_token(ctx, candidates);
  5993. t_start_sample_us = ggml_time_us();
  5994. // Compute error as the difference between observed surprise and target surprise value
  5995. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  5996. return candidate.id == X;
  5997. }));
  5998. float observed_surprise = -log2f(candidates->data[X_idx].p);
  5999. float e = observed_surprise - tau;
  6000. // Update mu using the learning rate and error
  6001. *mu = *mu - eta * e;
  6002. if (ctx) {
  6003. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6004. }
  6005. return X;
  6006. }
  6007. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  6008. const int64_t t_start_sample_us = ggml_time_us();
  6009. // Find max element
  6010. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  6011. return a.logit < b.logit;
  6012. });
  6013. llama_token result = max_iter->id;
  6014. if (ctx) {
  6015. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6016. ctx->n_sample++;
  6017. }
  6018. return result;
  6019. }
  6020. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  6021. GGML_ASSERT(ctx);
  6022. const int64_t t_start_sample_us = ggml_time_us();
  6023. llama_sample_softmax(nullptr, candidates);
  6024. std::vector<float> probs;
  6025. probs.reserve(candidates->size);
  6026. for (size_t i = 0; i < candidates->size; ++i) {
  6027. probs.push_back(candidates->data[i].p);
  6028. }
  6029. std::discrete_distribution<> dist(probs.begin(), probs.end());
  6030. auto & rng = ctx->rng;
  6031. int idx = dist(rng);
  6032. llama_token result = candidates->data[idx].id;
  6033. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6034. ctx->n_sample++;
  6035. return result;
  6036. }
  6037. void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
  6038. const int64_t t_start_sample_us = ggml_time_us();
  6039. if (token == llama_token_eos(&ctx->model)) {
  6040. for (const auto & stack : grammar->stacks) {
  6041. if (stack.empty()) {
  6042. return;
  6043. }
  6044. }
  6045. GGML_ASSERT(false);
  6046. }
  6047. const std::string piece = llama_token_to_piece(ctx, token);
  6048. // Note terminating 0 in decoded string
  6049. const auto decoded = decode_utf8(piece.c_str(), grammar->partial_utf8);
  6050. const auto & code_points = decoded.first;
  6051. for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
  6052. grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
  6053. }
  6054. grammar->partial_utf8 = decoded.second;
  6055. GGML_ASSERT(!grammar->stacks.empty());
  6056. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6057. }
  6058. //
  6059. // Beam search
  6060. //
  6061. struct llama_beam {
  6062. std::vector<llama_token> tokens;
  6063. float p; // Cumulative beam probability (renormalized relative to all beams)
  6064. bool eob; // Initialize end-of-beam to false. Callback sets this to true.
  6065. // Sort beams by probability. In case of ties, prefer beams at eob.
  6066. bool operator<(const llama_beam & rhs) const {
  6067. return std::make_pair(p, eob) < std::make_pair(rhs.p, rhs.eob);
  6068. }
  6069. // Shift off first n tokens and discard them.
  6070. void shift_tokens(const size_t n) {
  6071. if (n) {
  6072. std::copy(tokens.begin() + n, tokens.end(), tokens.begin());
  6073. tokens.resize(tokens.size() - n);
  6074. }
  6075. }
  6076. llama_beam_view view() const { return {tokens.data(), tokens.size(), p, eob}; }
  6077. };
  6078. // A struct for calculating logit-related info.
  6079. struct llama_logit_info {
  6080. const float * const logits;
  6081. const int n_vocab;
  6082. const float max_l;
  6083. const float normalizer;
  6084. struct sum_exp {
  6085. float max_l;
  6086. float operator()(float sum, float l) const { return sum + std::exp(l - max_l); }
  6087. };
  6088. llama_logit_info(llama_context * ctx)
  6089. : logits(llama_get_logits(ctx))
  6090. , n_vocab(llama_n_vocab(llama_get_model(ctx)))
  6091. , max_l(*std::max_element(logits, logits + n_vocab))
  6092. , normalizer(1.0f / std::accumulate(logits, logits + n_vocab, 0.0f, sum_exp{max_l}))
  6093. { }
  6094. llama_token_data get_token_data(const llama_token token_id) const {
  6095. constexpr auto p = std::numeric_limits<float>::quiet_NaN(); // never used
  6096. return {token_id, logits[token_id], p};
  6097. }
  6098. // Return top k token_data by logit.
  6099. std::vector<llama_token_data> top_k(size_t k) {
  6100. std::vector<llama_token_data> min_heap; // min-heap by logit
  6101. const llama_token k_min = std::min(static_cast<llama_token>(k), n_vocab);
  6102. min_heap.reserve(k_min);
  6103. for (llama_token token_id = 0 ; token_id < k_min ; ++token_id) {
  6104. min_heap.push_back(get_token_data(token_id));
  6105. }
  6106. auto comp = [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; };
  6107. std::make_heap(min_heap.begin(), min_heap.end(), comp);
  6108. for (llama_token token_id = k_min ; token_id < n_vocab ; ++token_id) {
  6109. if (min_heap.front().logit < logits[token_id]) {
  6110. std::pop_heap(min_heap.begin(), min_heap.end(), comp);
  6111. min_heap.back().id = token_id;
  6112. min_heap.back().logit = logits[token_id];
  6113. std::push_heap(min_heap.begin(), min_heap.end(), comp);
  6114. }
  6115. }
  6116. return min_heap;
  6117. }
  6118. float probability_from_logit(float logit) const {
  6119. return normalizer * std::exp(logit - max_l);
  6120. }
  6121. };
  6122. struct llama_beam_search_data {
  6123. llama_context * ctx;
  6124. size_t n_beams;
  6125. int n_past;
  6126. int n_predict;
  6127. std::vector<llama_beam> beams;
  6128. std::vector<llama_beam> next_beams;
  6129. // Re-calculated on each loop iteration
  6130. size_t common_prefix_length;
  6131. // Used to communicate to/from callback on beams state.
  6132. std::vector<llama_beam_view> beam_views;
  6133. llama_beam_search_data(llama_context * ctx, size_t n_beams, int n_past, int n_predict)
  6134. : ctx(ctx)
  6135. , n_beams(n_beams)
  6136. , n_past(n_past)
  6137. , n_predict(n_predict)
  6138. , beam_views(n_beams) {
  6139. beams.reserve(n_beams);
  6140. next_beams.reserve(n_beams);
  6141. }
  6142. // Collapse beams to a single beam given by index.
  6143. void collapse_beams(const size_t beam_idx) {
  6144. if (0u < beam_idx) {
  6145. std::swap(beams[0], beams[beam_idx]);
  6146. }
  6147. beams.resize(1);
  6148. }
  6149. // Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
  6150. // The repetative patterns below reflect the 2 stages of heaps:
  6151. // * Gather elements until the vector is full, then call std::make_heap() on it.
  6152. // * If the heap is full and a new element is found that should be included, pop the
  6153. // least element to the back(), replace it with the new, then push it into the heap.
  6154. void fill_next_beams_by_top_probabilities(llama_beam & beam) {
  6155. // Min-heaps use a greater-than comparator.
  6156. const auto comp = [](const llama_beam & a, const llama_beam & b) { return a.p > b.p; };
  6157. if (beam.eob) {
  6158. // beam is at end-of-sentence, so just copy it to next_beams if its probability is high enough.
  6159. if (next_beams.size() < n_beams) {
  6160. next_beams.push_back(std::move(beam));
  6161. if (next_beams.size() == n_beams) {
  6162. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  6163. }
  6164. } else if (next_beams.front().p < beam.p) {
  6165. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  6166. next_beams.back() = std::move(beam);
  6167. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  6168. }
  6169. } else {
  6170. // beam is not at end-of-sentence, so branch with next top_k tokens.
  6171. if (!beam.tokens.empty()) {
  6172. llama_decode(ctx, llama_batch_get_one(beam.tokens.data(), beam.tokens.size(), n_past, 0));
  6173. }
  6174. llama_logit_info logit_info(ctx);
  6175. std::vector<llama_token_data> next_tokens = logit_info.top_k(n_beams);
  6176. size_t i=0;
  6177. if (next_beams.size() < n_beams) {
  6178. for (; next_beams.size() < n_beams ; ++i) {
  6179. llama_beam next_beam = beam;
  6180. next_beam.tokens.push_back(next_tokens[i].id);
  6181. next_beam.p *= logit_info.probability_from_logit(next_tokens[i].logit);
  6182. next_beams.push_back(std::move(next_beam));
  6183. }
  6184. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  6185. } else {
  6186. for (; next_beams.front().p == 0.0f ; ++i) {
  6187. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  6188. next_beams.back() = beam;
  6189. next_beams.back().tokens.push_back(next_tokens[i].id);
  6190. next_beams.back().p *= logit_info.probability_from_logit(next_tokens[i].logit);
  6191. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  6192. }
  6193. }
  6194. for (; i < n_beams ; ++i) {
  6195. const float next_p = beam.p * logit_info.probability_from_logit(next_tokens[i].logit);
  6196. if (next_beams.front().p < next_p) {
  6197. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  6198. next_beams.back() = beam;
  6199. next_beams.back().tokens.push_back(next_tokens[i].id);
  6200. next_beams.back().p = next_p;
  6201. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  6202. }
  6203. }
  6204. }
  6205. }
  6206. // Find common_prefix_length based on beams.
  6207. // Requires beams is not empty.
  6208. size_t find_common_prefix_length() {
  6209. size_t common_prefix_length = beams[0].tokens.size();
  6210. for (size_t i = 1 ; i < beams.size() ; ++i) {
  6211. common_prefix_length = std::min(common_prefix_length, beams[i].tokens.size());
  6212. for (size_t j = 0 ; j < common_prefix_length ; ++j) {
  6213. if (beams[0].tokens[j] != beams[i].tokens[j]) {
  6214. common_prefix_length = j;
  6215. break;
  6216. }
  6217. }
  6218. }
  6219. return common_prefix_length;
  6220. }
  6221. // Construct beams_state to send back to caller via the callback function.
  6222. // Side effect: set common_prefix_length = find_common_prefix_length();
  6223. llama_beams_state get_beams_state(const bool last_call) {
  6224. for (size_t i = 0 ; i < beams.size() ; ++i) {
  6225. beam_views[i] = beams[i].view();
  6226. }
  6227. common_prefix_length = find_common_prefix_length();
  6228. return {beam_views.data(), beams.size(), common_prefix_length, last_call};
  6229. }
  6230. // Loop:
  6231. // * while i < n_predict, AND
  6232. // * any of the beams have not yet reached end-of-beam (eob), AND
  6233. // * the highest probability beam(s) (plural in case of ties) are not at end-of-sentence
  6234. // (since all other beam probabilities can only decrease)
  6235. void loop(const llama_beam_search_callback_fn_t callback, void * const callback_data) {
  6236. beams.push_back({{}, 1.0f, false}); // Start with one empty beam w/ probability = 1.0 and !eob.
  6237. const auto not_eob = [](const llama_beam & beam) { return !beam.eob; };
  6238. for (int i = 0 ; i < n_predict && std::any_of(beams.begin(),beams.end(),not_eob) &&
  6239. !beams[top_beam_index()].eob ; ++i) {
  6240. callback(callback_data, get_beams_state(false)); // Sets common_prefix_length
  6241. update_beams_from_beam_views(); // Update values (p,eob) that callback may have changed.
  6242. if (common_prefix_length) {
  6243. llama_decode(ctx, llama_batch_get_one(beams[0].tokens.data(), common_prefix_length, n_past, 0));
  6244. n_past += common_prefix_length;
  6245. }
  6246. // Zero-out next_beam probabilities to place them last in following min-heap.
  6247. std::for_each(next_beams.begin(), next_beams.end(), [](llama_beam & beam) { beam.p = 0.0f; });
  6248. for (llama_beam & beam : beams) {
  6249. beam.shift_tokens(common_prefix_length);
  6250. fill_next_beams_by_top_probabilities(beam);
  6251. }
  6252. // next_beams become the beams of next/final iteration. Swap them to re-use memory.
  6253. beams.swap(next_beams);
  6254. renormalize_beam_probabilities(beams);
  6255. }
  6256. collapse_beams(top_beam_index());
  6257. callback(callback_data, get_beams_state(true));
  6258. }
  6259. // As beams grow, the cumulative probabilities decrease.
  6260. // Renormalize them to avoid floating point underflow.
  6261. static void renormalize_beam_probabilities(std::vector<llama_beam> & beams) {
  6262. const auto sum_p = [](float sum, llama_beam & beam) { return sum + beam.p; };
  6263. const float inv_sum = 1.0f / std::accumulate(beams.begin(), beams.end(), 0.0f, sum_p);
  6264. std::for_each(beams.begin(), beams.end(), [=](llama_beam & beam) { beam.p *= inv_sum; });
  6265. }
  6266. // Assumes beams is non-empty. Uses llama_beam::operator<() for ordering.
  6267. size_t top_beam_index() {
  6268. return std::max_element(beams.begin(), beams.end()) - beams.begin();
  6269. }
  6270. // Copy (p,eob) for each beam which may have been changed by the callback.
  6271. void update_beams_from_beam_views() {
  6272. for (size_t i = 0 ; i < beams.size() ; ++i) {
  6273. beams[i].p = beam_views[i].p;
  6274. beams[i].eob = beam_views[i].eob;
  6275. }
  6276. }
  6277. };
  6278. void llama_beam_search(llama_context * ctx,
  6279. llama_beam_search_callback_fn_t callback, void * callback_data,
  6280. size_t n_beams, int n_past, int n_predict) {
  6281. assert(ctx);
  6282. const int64_t t_start_sample_us = ggml_time_us();
  6283. llama_beam_search_data beam_search_data(ctx, n_beams, n_past, n_predict);
  6284. beam_search_data.loop(callback, callback_data);
  6285. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6286. ctx->n_sample++;
  6287. }
  6288. //
  6289. // quantization
  6290. //
  6291. template <typename T>
  6292. struct no_init {
  6293. T value;
  6294. no_init() { /* do nothing */ }
  6295. };
  6296. struct quantize_state_internal {
  6297. const llama_model & model;
  6298. const llama_model_quantize_params * params;
  6299. int n_attention_wv = 0;
  6300. int n_feed_forward_w2 = 0;
  6301. int i_attention_wv = 0;
  6302. int i_feed_forward_w2 = 0;
  6303. int n_k_quantized = 0;
  6304. int n_fallback = 0;
  6305. quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
  6306. : model(model)
  6307. , params(params)
  6308. {}
  6309. };
  6310. static void llama_convert_tensor_internal(
  6311. struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
  6312. const size_t nelements, const int nthread
  6313. ) {
  6314. if (output.size() < nelements) {
  6315. output.resize(nelements);
  6316. }
  6317. float * f32_output = (float *) output.data();
  6318. ggml_type_traits_t qtype;
  6319. if (ggml_is_quantized(tensor->type)) {
  6320. qtype = ggml_internal_get_type_traits(tensor->type);
  6321. if (qtype.to_float == NULL) {
  6322. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
  6323. }
  6324. } else if (tensor->type != GGML_TYPE_F16) {
  6325. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
  6326. }
  6327. if (nthread < 2) {
  6328. if (tensor->type == GGML_TYPE_F16) {
  6329. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
  6330. } else if (ggml_is_quantized(tensor->type)) {
  6331. qtype.to_float(tensor->data, f32_output, nelements);
  6332. } else {
  6333. GGML_ASSERT(false); // unreachable
  6334. }
  6335. return;
  6336. }
  6337. auto block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type);
  6338. auto block_size_bytes = ggml_type_size(tensor->type);
  6339. GGML_ASSERT(nelements % block_size == 0);
  6340. auto nblocks = nelements / block_size;
  6341. auto blocks_per_thread = nblocks / nthread;
  6342. auto spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  6343. for (auto tnum = 0, in_buff_offs = 0, out_buff_offs = 0; tnum < nthread; tnum++) {
  6344. auto thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  6345. auto thr_elems = thr_blocks * block_size; // number of elements for this thread
  6346. auto thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  6347. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  6348. if (typ == GGML_TYPE_F16) {
  6349. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  6350. } else {
  6351. qtype.to_float(inbuf, outbuf, nels);
  6352. }
  6353. };
  6354. workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
  6355. in_buff_offs += thr_block_bytes;
  6356. out_buff_offs += thr_elems;
  6357. }
  6358. for (auto & w : workers) { w.join(); }
  6359. workers.clear();
  6360. }
  6361. static ggml_type get_k_quant_type(
  6362. quantize_state_internal & qs,
  6363. ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype
  6364. ) {
  6365. const std::string name = ggml_get_name(tensor);
  6366. // TODO: avoid hardcoded tensor names - use the TN_* constants
  6367. const llm_arch arch = qs.model.arch;
  6368. const auto tn = LLM_TN(arch);
  6369. auto use_more_bits = [](int i_layer, int num_layers) -> bool {
  6370. return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
  6371. };
  6372. if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
  6373. int nx = tensor->ne[0];
  6374. if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
  6375. new_type = GGML_TYPE_Q8_0;
  6376. }
  6377. else if (new_type != GGML_TYPE_Q8_0) {
  6378. new_type = GGML_TYPE_Q6_K;
  6379. }
  6380. } else if (name.find("attn_v.weight") != std::string::npos) {
  6381. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  6382. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  6383. new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  6384. }
  6385. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  6386. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  6387. use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
  6388. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
  6389. else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
  6390. (qs.i_attention_wv < qs.n_attention_wv/8 || qs.i_attention_wv >= 7*qs.n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
  6391. if (qs.model.type == MODEL_70B) {
  6392. // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
  6393. // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
  6394. // nearly negligible increase in model size by quantizing this tensor with more bits:
  6395. if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
  6396. }
  6397. ++qs.i_attention_wv;
  6398. } else if (name.find("ffn_down.weight") != std::string::npos) {
  6399. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  6400. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  6401. new_type = qs.i_feed_forward_w2 < 2 ? GGML_TYPE_Q5_K
  6402. : arch != LLM_ARCH_FALCON || use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2) ? GGML_TYPE_Q4_K
  6403. : GGML_TYPE_Q3_K;
  6404. }
  6405. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
  6406. new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
  6407. }
  6408. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
  6409. if (arch == LLM_ARCH_FALCON) {
  6410. new_type = qs.i_feed_forward_w2 < 2 ? GGML_TYPE_Q6_K :
  6411. use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  6412. } else {
  6413. if (use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
  6414. }
  6415. }
  6416. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
  6417. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && qs.i_feed_forward_w2 < 4) {
  6418. new_type = GGML_TYPE_Q5_K;
  6419. }
  6420. ++qs.i_feed_forward_w2;
  6421. } else if (name.find("attn_output.weight") != std::string::npos) {
  6422. if (arch != LLM_ARCH_FALCON) {
  6423. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
  6424. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) new_type = GGML_TYPE_Q4_K;
  6425. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  6426. } else {
  6427. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  6428. }
  6429. }
  6430. else if (name.find("attn_qkv.weight") != std::string::npos) {
  6431. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  6432. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
  6433. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
  6434. }
  6435. else if (name.find("ffn_gate.weight") != std::string::npos || name.find("ffn_up.weight") != std::string::npos) {
  6436. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  6437. }
  6438. // This can be used to reduce the size of the Q5_K_S model.
  6439. // The associated PPL increase is fully in line with the size reduction
  6440. //else {
  6441. // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
  6442. //}
  6443. bool convert_incompatible_tensor = false;
  6444. if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
  6445. new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) {
  6446. int nx = tensor->ne[0];
  6447. int ny = tensor->ne[1];
  6448. if (nx % QK_K != 0) {
  6449. LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
  6450. convert_incompatible_tensor = true;
  6451. } else {
  6452. ++qs.n_k_quantized;
  6453. }
  6454. }
  6455. if (convert_incompatible_tensor) {
  6456. switch (new_type) {
  6457. case GGML_TYPE_Q2_K: new_type = GGML_TYPE_Q4_0; break;
  6458. case GGML_TYPE_Q3_K: new_type = GGML_TYPE_Q4_1; break;
  6459. case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
  6460. case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
  6461. case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
  6462. default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
  6463. }
  6464. LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
  6465. ++qs.n_fallback;
  6466. }
  6467. return new_type;
  6468. }
  6469. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  6470. ggml_type quantized_type;
  6471. llama_ftype ftype = params->ftype;
  6472. switch (params->ftype) {
  6473. case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
  6474. case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
  6475. case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
  6476. case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
  6477. case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
  6478. case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
  6479. case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
  6480. // K-quants
  6481. case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
  6482. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  6483. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  6484. case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break;
  6485. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  6486. case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break;
  6487. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  6488. case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
  6489. case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
  6490. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  6491. }
  6492. int nthread = params->nthread;
  6493. if (nthread <= 0) {
  6494. nthread = std::thread::hardware_concurrency();
  6495. }
  6496. // mmap consistently increases speed Linux, and also increases speed on Windows with
  6497. // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
  6498. #if defined(__linux__) || defined(_WIN32)
  6499. constexpr bool use_mmap = true;
  6500. #else
  6501. constexpr bool use_mmap = false;
  6502. #endif
  6503. llama_model_loader ml(fname_inp, use_mmap);
  6504. if (ml.use_mmap) {
  6505. ml.mapping.reset(new llama_mmap(&ml.file, /* prefetch */ 0, ggml_is_numa()));
  6506. }
  6507. llama_model model;
  6508. llm_load_arch(ml, model);
  6509. llm_load_hparams(ml, model);
  6510. struct quantize_state_internal qs(model, params);
  6511. if (params->only_copy) {
  6512. ftype = model.ftype;
  6513. }
  6514. const size_t align = GGUF_DEFAULT_ALIGNMENT;
  6515. struct gguf_context * ctx_out = gguf_init_empty();
  6516. // copy the KV pairs from the input file
  6517. gguf_set_kv (ctx_out, ml.ctx_gguf);
  6518. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  6519. gguf_set_val_u32(ctx_out, "general.file_type", ftype);
  6520. for (int i = 0; i < ml.n_tensors; ++i) {
  6521. struct ggml_tensor * meta = ml.get_tensor_meta(i);
  6522. const std::string name = ggml_get_name(meta);
  6523. // TODO: avoid hardcoded tensor names - use the TN_* constants
  6524. if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
  6525. ++qs.n_attention_wv;
  6526. }
  6527. else if (name.find("ffn_down.weight") != std::string::npos) {
  6528. ++qs.n_feed_forward_w2;
  6529. }
  6530. }
  6531. if (qs.n_attention_wv != qs.n_feed_forward_w2 || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) {
  6532. LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_feed_forward_w2 = %d, hparams.n_layer = %d\n",
  6533. __func__, qs.n_attention_wv, qs.n_feed_forward_w2, model.hparams.n_layer);
  6534. }
  6535. size_t total_size_org = 0;
  6536. size_t total_size_new = 0;
  6537. std::vector<int64_t> hist_all(1 << 4, 0);
  6538. std::vector<std::thread> workers;
  6539. workers.reserve(nthread);
  6540. std::mutex mutex;
  6541. int idx = 0;
  6542. std::vector<no_init<uint8_t>> read_data;
  6543. std::vector<no_init<uint8_t>> work;
  6544. std::vector<no_init<float>> f32_conv_buf;
  6545. // populate the original tensors so we get an initial meta data
  6546. for (int i = 0; i < ml.n_tensors; ++i) {
  6547. struct ggml_tensor * meta = ml.get_tensor_meta(i);
  6548. gguf_add_tensor(ctx_out, meta);
  6549. }
  6550. std::ofstream fout(fname_out, std::ios::binary);
  6551. fout.exceptions(std::ofstream::failbit); // fail fast on write errors
  6552. const size_t meta_size = gguf_get_meta_size(ctx_out);
  6553. LLAMA_LOG_INFO("%s: meta size = %zu bytes\n", __func__, meta_size);
  6554. // placeholder for the meta data
  6555. ::zeros(fout, meta_size);
  6556. for (int i = 0; i < ml.n_tensors; ++i) {
  6557. struct ggml_tensor * tensor = ml.get_tensor_meta(i);
  6558. const std::string name = ggml_get_name(tensor);
  6559. if (!ml.use_mmap) {
  6560. if (read_data.size() < ggml_nbytes(tensor)) {
  6561. read_data.resize(ggml_nbytes(tensor));
  6562. }
  6563. tensor->data = read_data.data();
  6564. }
  6565. ml.load_data_for(tensor);
  6566. LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
  6567. ++idx, ml.n_tensors,
  6568. ggml_get_name(tensor),
  6569. llama_format_tensor_shape(tensor).c_str(),
  6570. ggml_type_name(tensor->type));
  6571. // This used to be a regex, but <regex> has an extreme cost to compile times.
  6572. bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
  6573. // quantize only 2D tensors
  6574. quantize &= (tensor->n_dims == 2);
  6575. quantize &= params->quantize_output_tensor || name != "output.weight";
  6576. quantize &= !params->only_copy;
  6577. enum ggml_type new_type;
  6578. void * new_data;
  6579. size_t new_size;
  6580. if (quantize) {
  6581. new_type = quantized_type;
  6582. if (!params->pure) {
  6583. new_type = get_k_quant_type(qs, new_type, tensor, ftype);
  6584. }
  6585. // If we've decided to quantize to the same type the tensor is already
  6586. // in then there's nothing to do.
  6587. quantize = tensor->type != new_type;
  6588. }
  6589. if (!quantize) {
  6590. new_type = tensor->type;
  6591. new_data = tensor->data;
  6592. new_size = ggml_nbytes(tensor);
  6593. LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
  6594. } else {
  6595. const size_t nelements = ggml_nelements(tensor);
  6596. float * f32_data;
  6597. if (tensor->type == GGML_TYPE_F32) {
  6598. f32_data = (float *) tensor->data;
  6599. } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
  6600. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
  6601. } else {
  6602. llama_convert_tensor_internal(tensor, f32_conv_buf, workers, nelements, nthread);
  6603. f32_data = (float *) f32_conv_buf.data();
  6604. }
  6605. LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
  6606. fflush(stdout);
  6607. if (work.size() < nelements * 4) {
  6608. work.resize(nelements * 4); // upper bound on size
  6609. }
  6610. new_data = work.data();
  6611. std::array<int64_t, 1 << 4> hist_cur = {};
  6612. static const int chunk_size = 32 * 512;
  6613. const int nchunk = (nelements + chunk_size - 1)/chunk_size;
  6614. const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
  6615. if (nthread_use < 2) {
  6616. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data());
  6617. } else {
  6618. size_t counter = 0;
  6619. new_size = 0;
  6620. auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements]() {
  6621. std::array<int64_t, 1 << 4> local_hist = {};
  6622. size_t local_size = 0;
  6623. while (true) {
  6624. std::unique_lock<std::mutex> lock(mutex);
  6625. size_t first = counter; counter += chunk_size;
  6626. if (first >= nelements) {
  6627. if (local_size > 0) {
  6628. for (int j=0; j<int(local_hist.size()); ++j) {
  6629. hist_cur[j] += local_hist[j];
  6630. }
  6631. new_size += local_size;
  6632. }
  6633. break;
  6634. }
  6635. lock.unlock();
  6636. size_t last = std::min(nelements, first + chunk_size);
  6637. local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data());
  6638. }
  6639. };
  6640. for (int it = 0; it < nthread_use - 1; ++it) {
  6641. workers.emplace_back(compute);
  6642. }
  6643. compute();
  6644. for (auto & w : workers) { w.join(); }
  6645. workers.clear();
  6646. }
  6647. LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB | hist: ", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
  6648. int64_t tot_count = 0;
  6649. for (size_t i = 0; i < hist_cur.size(); i++) {
  6650. hist_all[i] += hist_cur[i];
  6651. tot_count += hist_cur[i];
  6652. }
  6653. if (tot_count > 0) {
  6654. for (size_t i = 0; i < hist_cur.size(); i++) {
  6655. LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
  6656. }
  6657. }
  6658. LLAMA_LOG_INFO("\n");
  6659. }
  6660. total_size_org += ggml_nbytes(tensor);
  6661. total_size_new += new_size;
  6662. // update the gguf meta data as we go
  6663. gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
  6664. gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
  6665. // write tensor data + padding
  6666. fout.write((const char *) new_data, new_size);
  6667. zeros(fout, GGML_PAD(new_size, align) - new_size);
  6668. }
  6669. // go back to beginning of file and write the updated meta data
  6670. {
  6671. fout.seekp(0);
  6672. std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
  6673. gguf_get_meta_data(ctx_out, data.data());
  6674. fout.write((const char *) data.data(), data.size());
  6675. }
  6676. fout.close();
  6677. gguf_free(ctx_out);
  6678. LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  6679. LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  6680. // print histogram for all tensors
  6681. {
  6682. int64_t sum_all = 0;
  6683. for (size_t i = 0; i < hist_all.size(); i++) {
  6684. sum_all += hist_all[i];
  6685. }
  6686. if (sum_all > 0) {
  6687. LLAMA_LOG_INFO("%s: hist: ", __func__);
  6688. for (size_t i = 0; i < hist_all.size(); i++) {
  6689. LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
  6690. }
  6691. LLAMA_LOG_INFO("\n");
  6692. }
  6693. }
  6694. if (qs.n_fallback > 0) {
  6695. LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) incompatible with k-quants and required fallback quantization\n",
  6696. __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
  6697. }
  6698. }
  6699. static int llama_apply_lora_from_file_internal(
  6700. const struct llama_model & model, const char * path_lora, float scale, const char * path_base_model, int n_threads
  6701. ) {
  6702. LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  6703. const int64_t t_start_lora_us = ggml_time_us();
  6704. auto fin = std::ifstream(path_lora, std::ios::binary);
  6705. if (!fin) {
  6706. LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora);
  6707. return 1;
  6708. }
  6709. // verify magic and version
  6710. {
  6711. uint32_t magic;
  6712. fin.read((char *) &magic, sizeof(magic));
  6713. uint32_t format_version;
  6714. fin.read((char *) &format_version, sizeof(format_version));
  6715. if (format_version != 1) {
  6716. LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
  6717. return 1;
  6718. }
  6719. }
  6720. int32_t lora_r;
  6721. int32_t lora_alpha;
  6722. fin.read((char *) &lora_r, sizeof(lora_r));
  6723. fin.read((char *) &lora_alpha, sizeof(lora_alpha));
  6724. float scaling = scale * (float)lora_alpha / (float)lora_r;
  6725. LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  6726. // create a temporary ggml context to store the lora tensors
  6727. // todo: calculate size from biggest possible tensor
  6728. std::vector<uint8_t> lora_buf(1024ull * 1024ull * 1024ull);
  6729. struct ggml_init_params params;
  6730. params.mem_size = lora_buf.size();
  6731. params.mem_buffer = lora_buf.data();
  6732. params.no_alloc = false;
  6733. ggml_context * lora_ctx = ggml_init(params);
  6734. std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
  6735. // create a name -> tensor map of the model to accelerate lookups
  6736. std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
  6737. for (const auto & kv : model.tensors_by_name) {
  6738. model_tensors.insert(kv);
  6739. }
  6740. // load base model
  6741. std::unique_ptr<llama_model_loader> ml;
  6742. ggml_context * base_ctx = NULL;
  6743. std::vector<uint8_t> base_buf;
  6744. if (path_base_model) {
  6745. LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
  6746. ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
  6747. size_t ctx_size;
  6748. size_t mmapped_size;
  6749. ml->calc_sizes(ctx_size, mmapped_size);
  6750. base_buf.resize(ctx_size);
  6751. ggml_init_params base_params;
  6752. base_params.mem_size = base_buf.size();
  6753. base_params.mem_buffer = base_buf.data();
  6754. base_params.no_alloc = ml->use_mmap;
  6755. base_ctx = ggml_init(base_params);
  6756. // maybe this should in llama_model_loader
  6757. if (ml->use_mmap) {
  6758. ml->mapping.reset(new llama_mmap(&ml->file, /* prefetch */ 0, ggml_is_numa()));
  6759. }
  6760. }
  6761. // read tensors and apply
  6762. bool warned = false;
  6763. int n_tensors = 0;
  6764. std::vector<uint8_t> work_buffer;
  6765. while (true) {
  6766. int32_t n_dims;
  6767. int32_t length;
  6768. int32_t ftype;
  6769. fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
  6770. fin.read(reinterpret_cast<char *>(&length), sizeof(length));
  6771. fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
  6772. if (fin.eof()) {
  6773. break;
  6774. }
  6775. int32_t ne[2] = { 1, 1 };
  6776. for (int i = 0; i < n_dims; ++i) {
  6777. fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
  6778. }
  6779. std::string name;
  6780. {
  6781. char buf[1024];
  6782. fin.read(buf, length);
  6783. name = std::string(buf, length);
  6784. }
  6785. // check for lora suffix and get the type of tensor
  6786. const std::string lora_suffix = ".lora";
  6787. size_t pos = name.rfind(lora_suffix);
  6788. if (pos == std::string::npos) {
  6789. LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  6790. return 1;
  6791. }
  6792. std::string lora_type = name.substr(pos + lora_suffix.length());
  6793. std::string base_name = name;
  6794. base_name.erase(pos);
  6795. // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
  6796. if (model_tensors.find(base_name) == model_tensors.end()) {
  6797. LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
  6798. return 1;
  6799. }
  6800. // create ggml tensor
  6801. ggml_type wtype;
  6802. switch (ftype) {
  6803. case 0: wtype = GGML_TYPE_F32; break;
  6804. case 1: wtype = GGML_TYPE_F16; break;
  6805. default:
  6806. {
  6807. LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
  6808. __func__, ftype);
  6809. return false;
  6810. }
  6811. }
  6812. ggml_tensor * lora_tensor;
  6813. if (n_dims == 2) {
  6814. lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
  6815. }
  6816. else {
  6817. LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
  6818. return 1;
  6819. }
  6820. ggml_set_name(lora_tensor, "lora_tensor");
  6821. // load tensor data
  6822. size_t offset = fin.tellg();
  6823. size_t tensor_data_size = ggml_nbytes(lora_tensor);
  6824. offset = (offset + 31) & -32;
  6825. fin.seekg(offset);
  6826. fin.read((char*)lora_tensor->data, tensor_data_size);
  6827. lora_tensors[name] = lora_tensor;
  6828. // check if we have both A and B tensors and apply
  6829. if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
  6830. lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
  6831. ggml_tensor * dest_t = model_tensors[base_name];
  6832. offload_func_t offload_func = ggml_offload_nop;
  6833. offload_func_t offload_func_force_inplace = ggml_offload_nop;
  6834. #ifdef GGML_USE_CUBLAS
  6835. if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) {
  6836. if (dest_t->type != GGML_TYPE_F16) {
  6837. throw std::runtime_error(format(
  6838. "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models. dest_t->type: %d", __func__, dest_t->type));
  6839. }
  6840. offload_func = ggml_cuda_assign_buffers;
  6841. offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace;
  6842. }
  6843. #endif // GGML_USE_CUBLAS
  6844. ggml_tensor * base_t;
  6845. if (ml) {
  6846. struct gguf_context * ctx_gguf = ml->ctx_gguf;
  6847. // load from base model
  6848. if (gguf_find_tensor(ctx_gguf, base_name.c_str()) < 0) {
  6849. // TODO: throw
  6850. LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  6851. return 1;
  6852. }
  6853. // TODO: not tested!! maybe not working!
  6854. base_t = ml->create_tensor(base_ctx, base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU);
  6855. ml->load_data_for(base_t);
  6856. } else {
  6857. base_t = dest_t;
  6858. }
  6859. if (ggml_is_quantized(base_t->type)) {
  6860. if (!warned) {
  6861. LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  6862. "use a f16 or f32 base model with --lora-base\n", __func__);
  6863. warned = true;
  6864. }
  6865. }
  6866. ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
  6867. GGML_ASSERT(loraA->type == GGML_TYPE_F32);
  6868. ggml_set_name(loraA, "loraA");
  6869. ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
  6870. GGML_ASSERT(loraB->type == GGML_TYPE_F32);
  6871. ggml_set_name(loraB, "loraB");
  6872. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  6873. LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  6874. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  6875. return 1;
  6876. }
  6877. // w = w + BA*s
  6878. ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
  6879. offload_func(BA);
  6880. ggml_set_name(BA, "BA");
  6881. if (scaling != 1.0f) {
  6882. ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling);
  6883. ggml_set_name(scale_tensor, "scale_tensor");
  6884. BA = ggml_scale_inplace(lora_ctx, BA, scale_tensor);
  6885. offload_func(BA);
  6886. ggml_set_name(BA, "BA_scaled");
  6887. }
  6888. ggml_tensor * r;
  6889. if (base_t == dest_t) {
  6890. r = ggml_add_inplace(lora_ctx, dest_t, BA);
  6891. offload_func_force_inplace(r);
  6892. ggml_set_name(r, "r_add_inplace");
  6893. }
  6894. else {
  6895. r = ggml_add(lora_ctx, base_t, BA);
  6896. offload_func(r);
  6897. ggml_set_name(r, "r_add");
  6898. r = ggml_cpy(lora_ctx, r, dest_t);
  6899. offload_func(r);
  6900. ggml_set_name(r, "r_cpy");
  6901. }
  6902. struct ggml_cgraph * gf = ggml_new_graph(lora_ctx);
  6903. ggml_build_forward_expand(gf, r);
  6904. ggml_graph_compute_helper(work_buffer, gf, n_threads);
  6905. // we won't need these tensors again, reset the context to save memory
  6906. ggml_free(lora_ctx);
  6907. lora_ctx = ggml_init(params);
  6908. lora_tensors.clear();
  6909. n_tensors++;
  6910. if (n_tensors % 4 == 0) {
  6911. LLAMA_LOG_INFO(".");
  6912. }
  6913. }
  6914. }
  6915. // TODO: this should be in a destructor, it will leak on failure
  6916. ggml_free(lora_ctx);
  6917. if (base_ctx) {
  6918. ggml_free(base_ctx);
  6919. }
  6920. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  6921. LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
  6922. return 0;
  6923. }
  6924. //
  6925. // interface implementation
  6926. //
  6927. struct llama_model_params llama_model_default_params() {
  6928. struct llama_model_params result = {
  6929. /*.n_gpu_layers =*/ 0,
  6930. /*.main_gpu =*/ 0,
  6931. /*.tensor_split =*/ nullptr,
  6932. /*.progress_callback =*/ nullptr,
  6933. /*.progress_callback_user_data =*/ nullptr,
  6934. /*.vocab_only =*/ false,
  6935. /*.use_mmap =*/ true,
  6936. /*.use_mlock =*/ false,
  6937. };
  6938. #ifdef GGML_USE_METAL
  6939. result.n_gpu_layers = 1;
  6940. #endif
  6941. return result;
  6942. }
  6943. struct llama_context_params llama_context_default_params() {
  6944. struct llama_context_params result = {
  6945. /*.seed =*/ LLAMA_DEFAULT_SEED,
  6946. /*.n_ctx =*/ 512,
  6947. /*.n_batch =*/ 512,
  6948. /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
  6949. /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
  6950. /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_UNSPECIFIED,
  6951. /*.rope_freq_base =*/ 0.0f,
  6952. /*.rope_freq_scale =*/ 0.0f,
  6953. /*.yarn_ext_factor =*/ -1.0f,
  6954. /*.yarn_attn_factor =*/ 1.0f,
  6955. /*.yarn_beta_fast =*/ 32.0f,
  6956. /*.yarn_beta_slow =*/ 1.0f,
  6957. /*.yarn_orig_ctx =*/ 0,
  6958. /*.mul_mat_q =*/ true,
  6959. /*.f16_kv =*/ true,
  6960. /*.logits_all =*/ false,
  6961. /*.embedding =*/ false,
  6962. };
  6963. return result;
  6964. }
  6965. struct llama_model_quantize_params llama_model_quantize_default_params() {
  6966. struct llama_model_quantize_params result = {
  6967. /*.nthread =*/ 0,
  6968. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  6969. /*.allow_requantize =*/ false,
  6970. /*.quantize_output_tensor =*/ true,
  6971. /*.only_copy =*/ false,
  6972. /*.pure =*/ false,
  6973. };
  6974. return result;
  6975. }
  6976. int llama_max_devices(void) {
  6977. return LLAMA_MAX_DEVICES;
  6978. }
  6979. bool llama_mmap_supported(void) {
  6980. return llama_mmap::SUPPORTED;
  6981. }
  6982. bool llama_mlock_supported(void) {
  6983. return llama_mlock::SUPPORTED;
  6984. }
  6985. void llama_backend_init(bool numa) {
  6986. ggml_time_init();
  6987. // needed to initialize f16 tables
  6988. {
  6989. struct ggml_init_params params = { 0, NULL, false };
  6990. struct ggml_context * ctx = ggml_init(params);
  6991. ggml_free(ctx);
  6992. }
  6993. if (numa) {
  6994. ggml_numa_init();
  6995. }
  6996. #ifdef GGML_USE_MPI
  6997. ggml_mpi_backend_init();
  6998. #endif
  6999. }
  7000. void llama_backend_free(void) {
  7001. #ifdef GGML_USE_MPI
  7002. ggml_mpi_backend_free();
  7003. #endif
  7004. }
  7005. int64_t llama_time_us(void) {
  7006. return ggml_time_us();
  7007. }
  7008. struct llama_model * llama_load_model_from_file(
  7009. const char * path_model,
  7010. struct llama_model_params params) {
  7011. ggml_time_init();
  7012. llama_model * model = new llama_model;
  7013. unsigned cur_percentage = 0;
  7014. if (params.progress_callback == NULL) {
  7015. params.progress_callback_user_data = &cur_percentage;
  7016. params.progress_callback = [](float progress, void * ctx) {
  7017. unsigned * cur_percentage_p = (unsigned *) ctx;
  7018. unsigned percentage = (unsigned) (100 * progress);
  7019. while (percentage > *cur_percentage_p) {
  7020. *cur_percentage_p = percentage;
  7021. LLAMA_LOG_INFO(".");
  7022. if (percentage >= 100) {
  7023. LLAMA_LOG_INFO("\n");
  7024. }
  7025. }
  7026. };
  7027. }
  7028. if (!llama_model_load(path_model, *model, params)) {
  7029. LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  7030. delete model;
  7031. return nullptr;
  7032. }
  7033. return model;
  7034. }
  7035. void llama_free_model(struct llama_model * model) {
  7036. delete model;
  7037. }
  7038. struct llama_context * llama_new_context_with_model(
  7039. struct llama_model * model,
  7040. struct llama_context_params params) {
  7041. if (!model) {
  7042. return nullptr;
  7043. }
  7044. llama_context * ctx = new llama_context(*model);
  7045. const auto & hparams = model->hparams;
  7046. auto & cparams = ctx->cparams;
  7047. cparams.n_batch = params.n_batch;
  7048. cparams.n_threads = params.n_threads;
  7049. cparams.n_threads_batch = params.n_threads_batch;
  7050. cparams.yarn_ext_factor = params.yarn_ext_factor;
  7051. cparams.yarn_attn_factor = params.yarn_attn_factor;
  7052. cparams.yarn_beta_fast = params.yarn_beta_fast;
  7053. cparams.yarn_beta_slow = params.yarn_beta_slow;
  7054. cparams.mul_mat_q = params.mul_mat_q;
  7055. cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
  7056. cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
  7057. cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
  7058. cparams.n_yarn_orig_ctx = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
  7059. hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx :
  7060. hparams.n_ctx_train;
  7061. auto rope_scaling_type = params.rope_scaling_type;
  7062. if (rope_scaling_type == LLAMA_ROPE_SCALING_UNSPECIFIED) {
  7063. rope_scaling_type = hparams.rope_scaling_type_train;
  7064. }
  7065. if (rope_scaling_type == LLAMA_ROPE_SCALING_NONE) {
  7066. cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
  7067. }
  7068. if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
  7069. cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_YARN ? 1.0f : 0.0f;
  7070. }
  7071. if (params.seed == LLAMA_DEFAULT_SEED) {
  7072. params.seed = time(NULL);
  7073. }
  7074. LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
  7075. LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
  7076. LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
  7077. ctx->rng = std::mt19937(params.seed);
  7078. ctx->logits_all = params.logits_all;
  7079. ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
  7080. // reserve memory for context buffers
  7081. if (!hparams.vocab_only) {
  7082. if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, cparams.n_ctx, model->n_gpu_layers)) {
  7083. LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
  7084. llama_free(ctx);
  7085. return nullptr;
  7086. }
  7087. {
  7088. const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
  7089. LLAMA_LOG_INFO("%s: kv self size = %7.2f MiB\n", __func__, memory_size / 1024.0 / 1024.0);
  7090. }
  7091. // resized during inference
  7092. if (params.logits_all) {
  7093. ctx->logits.reserve(cparams.n_ctx*hparams.n_vocab);
  7094. } else {
  7095. ctx->logits.reserve(hparams.n_vocab);
  7096. }
  7097. if (params.embedding){
  7098. ctx->embedding.resize(hparams.n_embd);
  7099. }
  7100. {
  7101. static const size_t tensor_alignment = 32;
  7102. // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data
  7103. ctx->buf_compute.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead());
  7104. // create measure allocator
  7105. ctx->alloc = ggml_allocr_new_measure(tensor_alignment);
  7106. // build worst-case graph
  7107. int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_batch);
  7108. int n_past = cparams.n_ctx - n_tokens;
  7109. llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  7110. ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0));
  7111. #ifdef GGML_USE_METAL
  7112. if (model->n_gpu_layers > 0) {
  7113. ggml_metal_log_set_callback(llama_log_callback_default, NULL);
  7114. ctx->ctx_metal = ggml_metal_init(1);
  7115. if (!ctx->ctx_metal) {
  7116. LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
  7117. llama_free(ctx);
  7118. return NULL;
  7119. }
  7120. //ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false);
  7121. //ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
  7122. }
  7123. #endif
  7124. // measure memory requirements for the graph
  7125. size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
  7126. LLAMA_LOG_INFO("%s: compute buffer total size = %.2f MiB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
  7127. // recreate allocator with exact memory requirements
  7128. ggml_allocr_free(ctx->alloc);
  7129. ctx->buf_alloc.resize(alloc_size);
  7130. ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment);
  7131. #ifdef GGML_USE_METAL
  7132. if (ctx->ctx_metal) {
  7133. //ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
  7134. }
  7135. #endif
  7136. #ifdef GGML_USE_CUBLAS
  7137. ggml_cuda_set_scratch_size(alloc_size);
  7138. LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MiB\n", __func__, alloc_size / 1024.0 / 1024.0);
  7139. // calculate total VRAM usage
  7140. auto add_tensor = [](const ggml_tensor * t, size_t & size) {
  7141. if (t->backend == GGML_BACKEND_GPU || t->backend == GGML_BACKEND_GPU_SPLIT) {
  7142. size += ggml_nbytes(t);
  7143. }
  7144. };
  7145. size_t model_vram_size = 0;
  7146. for (const auto & kv : model->tensors_by_name) {
  7147. add_tensor(kv.second, model_vram_size);
  7148. }
  7149. size_t kv_vram_size = 0;
  7150. add_tensor(ctx->kv_self.k, kv_vram_size);
  7151. add_tensor(ctx->kv_self.v, kv_vram_size);
  7152. size_t ctx_vram_size = alloc_size + kv_vram_size;
  7153. size_t total_vram_size = model_vram_size + ctx_vram_size;
  7154. LLAMA_LOG_INFO("%s: total VRAM used: %.2f MiB (model: %.2f MiB, context: %.2f MiB)\n", __func__,
  7155. total_vram_size / 1024.0 / 1024.0,
  7156. model_vram_size / 1024.0 / 1024.0,
  7157. ctx_vram_size / 1024.0 / 1024.0);
  7158. #endif
  7159. }
  7160. #ifdef GGML_USE_METAL
  7161. if (model->n_gpu_layers > 0) {
  7162. // this allocates all Metal resources and memory buffers
  7163. void * data_ptr = NULL;
  7164. size_t data_size = 0;
  7165. if (ctx->model.mapping) {
  7166. data_ptr = ctx->model.mapping->addr;
  7167. data_size = ctx->model.mapping->size;
  7168. } else {
  7169. data_ptr = ggml_get_mem_buffer(ctx->model.ctx);
  7170. data_size = ggml_get_mem_size (ctx->model.ctx);
  7171. }
  7172. const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
  7173. LLAMA_LOG_INFO("%s: max tensor size = %8.2f MiB\n", __func__, max_size/1024.0/1024.0);
  7174. #define LLAMA_METAL_CHECK_BUF(result) \
  7175. if (!(result)) { \
  7176. LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \
  7177. llama_free(ctx); \
  7178. return NULL; \
  7179. }
  7180. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
  7181. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0));
  7182. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.data, ctx->buf_alloc.size, 0));
  7183. #undef LLAMA_METAL_CHECK_BUF
  7184. }
  7185. #endif
  7186. }
  7187. #ifdef GGML_USE_MPI
  7188. ctx->ctx_mpi = ggml_mpi_init();
  7189. if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
  7190. // Enter a blocking eval loop with dummy input, letting rank=0 drive the process
  7191. // TODO: needs fix after #3228
  7192. GGML_ASSERT(false && "not implemented");
  7193. //const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
  7194. //while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
  7195. llama_backend_free();
  7196. exit(1);
  7197. }
  7198. #endif
  7199. return ctx;
  7200. }
  7201. void llama_free(struct llama_context * ctx) {
  7202. delete ctx;
  7203. }
  7204. const llama_model * llama_get_model(const struct llama_context * ctx) {
  7205. return &ctx->model;
  7206. }
  7207. int llama_n_ctx(const struct llama_context * ctx) {
  7208. return ctx->cparams.n_ctx;
  7209. }
  7210. enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
  7211. return model->vocab.type;
  7212. }
  7213. int llama_n_vocab(const struct llama_model * model) {
  7214. return model->vocab.id_to_token.size();
  7215. }
  7216. int llama_n_ctx_train(const struct llama_model * model) {
  7217. return model->hparams.n_ctx_train;
  7218. }
  7219. int llama_n_embd(const struct llama_model * model) {
  7220. return model->hparams.n_embd;
  7221. }
  7222. float llama_rope_freq_scale_train(const struct llama_model * model) {
  7223. return model->hparams.rope_freq_scale_train;
  7224. }
  7225. int llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
  7226. const auto & it = model->gguf_kv.find(key);
  7227. if (it == model->gguf_kv.end()) {
  7228. if (buf_size > 0) {
  7229. buf[0] = '\0';
  7230. }
  7231. return -1;
  7232. }
  7233. return snprintf(buf, buf_size, "%s", it->second.c_str());
  7234. }
  7235. int llama_model_meta_count(const struct llama_model * model) {
  7236. return (int)model->gguf_kv.size();
  7237. }
  7238. int llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
  7239. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  7240. if (buf_size > 0) {
  7241. buf[0] = '\0';
  7242. }
  7243. return -1;
  7244. }
  7245. auto it = model->gguf_kv.begin();
  7246. std::advance(it, i);
  7247. return snprintf(buf, buf_size, "%s", it->first.c_str());
  7248. }
  7249. int llama_model_meta_val_str_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
  7250. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  7251. if (buf_size > 0) {
  7252. buf[0] = '\0';
  7253. }
  7254. return -1;
  7255. }
  7256. auto it = model->gguf_kv.begin();
  7257. std::advance(it, i);
  7258. return snprintf(buf, buf_size, "%s", it->second.c_str());
  7259. }
  7260. int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
  7261. return snprintf(buf, buf_size, "%s %s %s",
  7262. llama_model_arch_name(model->arch).c_str(),
  7263. llama_model_type_name(model->type),
  7264. llama_model_ftype_name(model->ftype).c_str());
  7265. }
  7266. uint64_t llama_model_size(const struct llama_model * model) {
  7267. uint64_t size = 0;
  7268. for (const auto & it : model->tensors_by_name) {
  7269. size += ggml_nbytes(it.second);
  7270. }
  7271. return size;
  7272. }
  7273. uint64_t llama_model_n_params(const struct llama_model * model) {
  7274. uint64_t nparams = 0;
  7275. for (const auto & it : model->tensors_by_name) {
  7276. nparams += ggml_nelements(it.second);
  7277. }
  7278. return nparams;
  7279. }
  7280. struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
  7281. return ggml_get_tensor(model->ctx, name);
  7282. }
  7283. int llama_model_quantize(
  7284. const char * fname_inp,
  7285. const char * fname_out,
  7286. const llama_model_quantize_params * params) {
  7287. try {
  7288. llama_model_quantize_internal(fname_inp, fname_out, params);
  7289. return 0;
  7290. } catch (const std::exception & err) {
  7291. LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
  7292. return 1;
  7293. }
  7294. }
  7295. int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, float scale, const char * path_base_model, int n_threads) {
  7296. try {
  7297. return llama_apply_lora_from_file_internal(ctx->model, path_lora, scale, path_base_model, n_threads);
  7298. } catch (const std::exception & err) {
  7299. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  7300. return 1;
  7301. }
  7302. }
  7303. int llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, float scale, const char * path_base_model, int n_threads) {
  7304. try {
  7305. return llama_apply_lora_from_file_internal(*model, path_lora, scale, path_base_model, n_threads);
  7306. } catch (const std::exception & err) {
  7307. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  7308. return 1;
  7309. }
  7310. }
  7311. int llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  7312. return ctx->kv_self.head;
  7313. }
  7314. void llama_kv_cache_clear(struct llama_context * ctx) {
  7315. llama_kv_cache_clear(ctx->kv_self);
  7316. }
  7317. void llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  7318. llama_kv_cache_seq_rm(ctx->kv_self, seq_id, p0, p1);
  7319. }
  7320. void llama_kv_cache_seq_cp(struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  7321. if (seq_id_src == seq_id_dst) {
  7322. return;
  7323. }
  7324. llama_kv_cache_seq_cp(ctx->kv_self, seq_id_src, seq_id_dst, p0, p1);
  7325. }
  7326. void llama_kv_cache_seq_keep(struct llama_context * ctx, llama_seq_id seq_id) {
  7327. llama_kv_cache_seq_keep(ctx->kv_self, seq_id);
  7328. }
  7329. void llama_kv_cache_seq_shift(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) {
  7330. llama_kv_cache_seq_shift(ctx->kv_self, seq_id, p0, p1, delta);
  7331. }
  7332. // Returns the *maximum* size of the state
  7333. size_t llama_get_state_size(const struct llama_context * ctx) {
  7334. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  7335. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  7336. const size_t s_rng_size = sizeof(size_t);
  7337. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  7338. const size_t s_logits_capacity = sizeof(size_t);
  7339. const size_t s_logits_size = sizeof(size_t);
  7340. const size_t s_logits = ctx->logits.capacity() * sizeof(float);
  7341. const size_t s_embedding_size = sizeof(size_t);
  7342. const size_t s_embedding = ctx->embedding.size() * sizeof(float);
  7343. const size_t s_kv_size = sizeof(size_t);
  7344. const size_t s_kv_ntok = sizeof(int);
  7345. const size_t s_kv = ctx->kv_self.buf.size;
  7346. const size_t s_total = (
  7347. + s_rng_size
  7348. + s_rng
  7349. + s_logits_capacity
  7350. + s_logits_size
  7351. + s_logits
  7352. + s_embedding_size
  7353. + s_embedding
  7354. + s_kv_size
  7355. + s_kv_ntok
  7356. + s_kv
  7357. );
  7358. return s_total;
  7359. }
  7360. // llama_context_data
  7361. struct llama_data_context {
  7362. virtual void write(const void * src, size_t size) = 0;
  7363. virtual size_t get_size_written() = 0;
  7364. virtual ~llama_data_context() = default;
  7365. };
  7366. struct llama_data_buffer_context : llama_data_context {
  7367. uint8_t * ptr;
  7368. size_t size_written = 0;
  7369. llama_data_buffer_context(uint8_t * p) : ptr(p) {}
  7370. void write(const void * src, size_t size) override {
  7371. memcpy(ptr, src, size);
  7372. ptr += size;
  7373. size_written += size;
  7374. }
  7375. size_t get_size_written() override {
  7376. return size_written;
  7377. }
  7378. };
  7379. struct llama_data_file_context : llama_data_context {
  7380. llama_file * file;
  7381. size_t size_written = 0;
  7382. llama_data_file_context(llama_file * f) : file(f) {}
  7383. void write(const void * src, size_t size) override {
  7384. file->write_raw(src, size);
  7385. size_written += size;
  7386. }
  7387. size_t get_size_written() override {
  7388. return size_written;
  7389. }
  7390. };
  7391. /** copy state data into either a buffer or file depending on the passed in context
  7392. *
  7393. * file context:
  7394. * llama_file file("/path", "wb");
  7395. * llama_data_file_context data_ctx(&file);
  7396. * llama_copy_state_data(ctx, &data_ctx);
  7397. *
  7398. * buffer context:
  7399. * std::vector<uint8_t> buf(max_size, 0);
  7400. * llama_data_buffer_context data_ctx(&buf.data());
  7401. * llama_copy_state_data(ctx, &data_ctx);
  7402. *
  7403. */
  7404. static void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
  7405. // copy rng
  7406. {
  7407. std::stringstream rng_ss;
  7408. rng_ss << ctx->rng;
  7409. const size_t rng_size = rng_ss.str().size();
  7410. char rng_buf[LLAMA_MAX_RNG_STATE];
  7411. memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE);
  7412. memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
  7413. data_ctx->write(&rng_size, sizeof(rng_size));
  7414. data_ctx->write(&rng_buf[0], LLAMA_MAX_RNG_STATE);
  7415. }
  7416. // copy logits
  7417. {
  7418. const size_t logits_cap = ctx->logits.capacity();
  7419. const size_t logits_size = ctx->logits.size();
  7420. data_ctx->write(&logits_cap, sizeof(logits_cap));
  7421. data_ctx->write(&logits_size, sizeof(logits_size));
  7422. if (logits_size) {
  7423. data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
  7424. }
  7425. // If there is a gap between the size and the capacity, write padding
  7426. size_t padding_size = (logits_cap - logits_size) * sizeof(float);
  7427. if (padding_size > 0) {
  7428. std::vector<uint8_t> padding(padding_size, 0); // Create a buffer filled with zeros
  7429. data_ctx->write(padding.data(), padding_size);
  7430. }
  7431. }
  7432. // copy embeddings
  7433. {
  7434. const size_t embedding_size = ctx->embedding.size();
  7435. data_ctx->write(&embedding_size, sizeof(embedding_size));
  7436. if (embedding_size) {
  7437. data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float));
  7438. }
  7439. }
  7440. // copy kv cache
  7441. {
  7442. const auto & kv_self = ctx->kv_self;
  7443. const auto & hparams = ctx->model.hparams;
  7444. const auto & cparams = ctx->cparams;
  7445. const auto n_layer = hparams.n_layer;
  7446. const auto n_embd = hparams.n_embd_gqa();
  7447. const auto n_ctx = cparams.n_ctx;
  7448. const size_t kv_buf_size = kv_self.buf.size;
  7449. const uint32_t kv_head = kv_self.head;
  7450. const uint32_t kv_size = kv_self.size;
  7451. data_ctx->write(&kv_buf_size, sizeof(kv_buf_size));
  7452. data_ctx->write(&kv_head, sizeof(kv_head));
  7453. data_ctx->write(&kv_size, sizeof(kv_size));
  7454. if (kv_buf_size) {
  7455. const size_t elt_size = ggml_element_size(kv_self.k);
  7456. ggml_context * cpy_ctx = ggml_init({ 6*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
  7457. ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
  7458. ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer);
  7459. std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
  7460. kout3d->data = kout3d_data.data();
  7461. ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_head, n_embd, n_layer);
  7462. std::vector<uint8_t> vout3d_data(ggml_nbytes(vout3d), 0);
  7463. vout3d->data = vout3d_data.data();
  7464. ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
  7465. n_embd, kv_head, n_layer,
  7466. elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
  7467. ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
  7468. kv_head, n_embd, n_layer,
  7469. elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
  7470. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k3d, kout3d));
  7471. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v3d, vout3d));
  7472. ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1);
  7473. ggml_free(cpy_ctx);
  7474. // our data is now in the kout3d_data and vout3d_data buffers
  7475. // write them to file
  7476. data_ctx->write(kout3d_data.data(), kout3d_data.size());
  7477. data_ctx->write(vout3d_data.data(), vout3d_data.size());
  7478. }
  7479. for (uint32_t i = 0; i < kv_size; ++i) {
  7480. const auto & cell = kv_self.cells[i];
  7481. const llama_pos pos = cell.pos;
  7482. const size_t seq_id_size = cell.seq_id.size();
  7483. data_ctx->write(&pos, sizeof(pos));
  7484. data_ctx->write(&seq_id_size, sizeof(seq_id_size));
  7485. for (auto seq_id : cell.seq_id) {
  7486. data_ctx->write(&seq_id, sizeof(seq_id));
  7487. }
  7488. }
  7489. }
  7490. }
  7491. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  7492. llama_data_buffer_context data_ctx(dst);
  7493. llama_copy_state_data_internal(ctx, &data_ctx);
  7494. return data_ctx.get_size_written();
  7495. }
  7496. // Sets the state reading from the specified source address
  7497. size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
  7498. uint8_t * inp = src;
  7499. // set rng
  7500. {
  7501. size_t rng_size;
  7502. char rng_buf[LLAMA_MAX_RNG_STATE];
  7503. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  7504. memcpy(&rng_buf[0], inp, LLAMA_MAX_RNG_STATE); inp += LLAMA_MAX_RNG_STATE;
  7505. std::stringstream rng_ss;
  7506. rng_ss.str(std::string(&rng_buf[0], rng_size));
  7507. rng_ss >> ctx->rng;
  7508. GGML_ASSERT(!rng_ss.fail());
  7509. }
  7510. // set logits
  7511. {
  7512. size_t logits_cap;
  7513. size_t logits_size;
  7514. memcpy(&logits_cap, inp, sizeof(logits_cap)); inp += sizeof(logits_cap);
  7515. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  7516. GGML_ASSERT(ctx->logits.capacity() == logits_cap);
  7517. if (logits_size) {
  7518. ctx->logits.resize(logits_size);
  7519. memcpy(ctx->logits.data(), inp, logits_size * sizeof(float));
  7520. }
  7521. inp += logits_cap * sizeof(float);
  7522. }
  7523. // set embeddings
  7524. {
  7525. size_t embedding_size;
  7526. memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size);
  7527. GGML_ASSERT(ctx->embedding.capacity() == embedding_size);
  7528. if (embedding_size) {
  7529. memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float));
  7530. inp += embedding_size * sizeof(float);
  7531. }
  7532. }
  7533. // set kv cache
  7534. {
  7535. const auto & kv_self = ctx->kv_self;
  7536. const auto & hparams = ctx->model.hparams;
  7537. const auto & cparams = ctx->cparams;
  7538. const int n_layer = hparams.n_layer;
  7539. const int n_embd = hparams.n_embd_gqa();
  7540. const int n_ctx = cparams.n_ctx;
  7541. size_t kv_buf_size;
  7542. uint32_t kv_head;
  7543. uint32_t kv_size;
  7544. memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size);
  7545. memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head);
  7546. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  7547. if (kv_buf_size) {
  7548. GGML_ASSERT(kv_self.buf.size == kv_buf_size);
  7549. const size_t elt_size = ggml_element_size(kv_self.k);
  7550. ggml_context * cpy_ctx = ggml_init({ 6*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
  7551. ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
  7552. ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer);
  7553. kin3d->data = (void *) inp;
  7554. inp += ggml_nbytes(kin3d);
  7555. ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_head, n_embd, n_layer);
  7556. vin3d->data = (void *) inp;
  7557. inp += ggml_nbytes(vin3d);
  7558. ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
  7559. n_embd, kv_head, n_layer,
  7560. elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
  7561. ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
  7562. kv_head, n_embd, n_layer,
  7563. elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
  7564. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin3d, k3d));
  7565. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin3d, v3d));
  7566. ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1);
  7567. ggml_free(cpy_ctx);
  7568. }
  7569. ctx->kv_self.head = kv_head;
  7570. ctx->kv_self.size = kv_size;
  7571. ctx->kv_self.cells.resize(kv_size);
  7572. for (uint32_t i = 0; i < kv_size; ++i) {
  7573. llama_pos pos;
  7574. size_t seq_id_size;
  7575. memcpy(&pos, inp, sizeof(pos)); inp += sizeof(pos);
  7576. memcpy(&seq_id_size, inp, sizeof(seq_id_size)); inp += sizeof(seq_id_size);
  7577. ctx->kv_self.cells[i].pos = pos;
  7578. llama_seq_id seq_id;
  7579. for (size_t j = 0; j < seq_id_size; ++j) {
  7580. memcpy(&seq_id, inp, sizeof(seq_id)); inp += sizeof(seq_id);
  7581. ctx->kv_self.cells[i].seq_id.insert(seq_id);
  7582. }
  7583. }
  7584. }
  7585. const size_t nread = inp - src;
  7586. const size_t max_size = llama_get_state_size(ctx);
  7587. GGML_ASSERT(nread <= max_size);
  7588. return nread;
  7589. }
  7590. static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  7591. llama_file file(path_session, "rb");
  7592. // sanity checks
  7593. {
  7594. const uint32_t magic = file.read_u32();
  7595. const uint32_t version = file.read_u32();
  7596. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  7597. LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  7598. return false;
  7599. }
  7600. llama_hparams session_hparams;
  7601. file.read_raw(&session_hparams, sizeof(llama_hparams));
  7602. if (session_hparams != ctx->model.hparams) {
  7603. LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
  7604. return false;
  7605. }
  7606. }
  7607. // load the prompt
  7608. {
  7609. const uint32_t n_token_count = file.read_u32();
  7610. if (n_token_count > n_token_capacity) {
  7611. LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  7612. return false;
  7613. }
  7614. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  7615. *n_token_count_out = n_token_count;
  7616. }
  7617. // restore the context state
  7618. {
  7619. const size_t n_state_size_cur = file.size - file.tell();
  7620. const size_t n_state_size_max = llama_get_state_size(ctx);
  7621. if (n_state_size_cur > n_state_size_max) {
  7622. LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  7623. return false;
  7624. }
  7625. std::vector<uint8_t> state_data(n_state_size_max);
  7626. file.read_raw(state_data.data(), n_state_size_cur);
  7627. llama_set_state_data(ctx, state_data.data());
  7628. }
  7629. return true;
  7630. }
  7631. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  7632. try {
  7633. return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  7634. } catch (const std::exception & err) {
  7635. LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
  7636. return false;
  7637. }
  7638. }
  7639. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  7640. llama_file file(path_session, "wb");
  7641. file.write_u32(LLAMA_SESSION_MAGIC);
  7642. file.write_u32(LLAMA_SESSION_VERSION);
  7643. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  7644. // save the prompt
  7645. file.write_u32((uint32_t) n_token_count);
  7646. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  7647. // save the context state using stream saving
  7648. llama_data_file_context data_ctx(&file);
  7649. llama_copy_state_data_internal(ctx, &data_ctx);
  7650. return true;
  7651. }
  7652. int llama_eval(
  7653. struct llama_context * ctx,
  7654. llama_token * tokens,
  7655. int32_t n_tokens,
  7656. int n_past) {
  7657. llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
  7658. const int ret = llama_decode_internal(*ctx, llama_batch_get_one(tokens, n_tokens, n_past, 0));
  7659. if (ret < 0) {
  7660. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  7661. }
  7662. return ret;
  7663. }
  7664. int llama_eval_embd(
  7665. struct llama_context * ctx,
  7666. float * embd,
  7667. int32_t n_tokens,
  7668. int n_past) {
  7669. llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
  7670. llama_batch batch = { n_tokens, nullptr, embd, nullptr, nullptr, nullptr, nullptr, n_past, 1, 0, };
  7671. const int ret = llama_decode_internal(*ctx, batch);
  7672. if (ret < 0) {
  7673. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  7674. }
  7675. return ret;
  7676. }
  7677. void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch) {
  7678. ctx->cparams.n_threads = n_threads;
  7679. ctx->cparams.n_threads_batch = n_threads_batch;
  7680. }
  7681. struct llama_batch llama_batch_get_one(
  7682. llama_token * tokens,
  7683. int32_t n_tokens,
  7684. llama_pos pos_0,
  7685. llama_seq_id seq_id) {
  7686. return {
  7687. /*n_tokens =*/ n_tokens,
  7688. /*tokens =*/ tokens,
  7689. /*embd =*/ nullptr,
  7690. /*pos =*/ nullptr,
  7691. /*n_seq_id =*/ nullptr,
  7692. /*seq_id =*/ nullptr,
  7693. /*logits =*/ nullptr,
  7694. /*all_pos_0 =*/ pos_0,
  7695. /*all_pos_1 =*/ 1,
  7696. /*all_seq_id =*/ seq_id,
  7697. };
  7698. }
  7699. struct llama_batch llama_batch_init(int32_t n_tokens, int32_t embd, int32_t n_seq_max) {
  7700. llama_batch batch = { 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, 0, 0, 0, };
  7701. if (embd) {
  7702. batch.embd = (float *) malloc(sizeof(float) * n_tokens * embd);
  7703. } else {
  7704. batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens);
  7705. }
  7706. batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens);
  7707. batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens);
  7708. batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * n_tokens);
  7709. for (int i = 0; i < n_tokens; ++i) {
  7710. batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
  7711. }
  7712. batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens);
  7713. return batch;
  7714. }
  7715. void llama_batch_free(struct llama_batch batch) {
  7716. if (batch.token) free(batch.token);
  7717. if (batch.embd) free(batch.embd);
  7718. if (batch.pos) free(batch.pos);
  7719. if (batch.n_seq_id) free(batch.n_seq_id);
  7720. if (batch.seq_id) {
  7721. for (int i = 0; i < batch.n_tokens; ++i) {
  7722. free(batch.seq_id[i]);
  7723. }
  7724. free(batch.seq_id);
  7725. }
  7726. if (batch.logits) free(batch.logits);
  7727. }
  7728. int llama_decode(
  7729. struct llama_context * ctx,
  7730. struct llama_batch batch) {
  7731. const int ret = llama_decode_internal(*ctx, batch);
  7732. if (ret < 0) {
  7733. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  7734. }
  7735. return ret;
  7736. }
  7737. float * llama_get_logits(struct llama_context * ctx) {
  7738. return ctx->logits.data();
  7739. }
  7740. float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
  7741. return ctx->logits.data() + i*ctx->model.hparams.n_vocab;
  7742. }
  7743. float * llama_get_embeddings(struct llama_context * ctx) {
  7744. return ctx->embedding.data();
  7745. }
  7746. const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
  7747. return model->vocab.id_to_token[token].text.c_str();
  7748. }
  7749. float llama_token_get_score(const struct llama_model * model, llama_token token) {
  7750. return model->vocab.id_to_token[token].score;
  7751. }
  7752. llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token) {
  7753. return model->vocab.id_to_token[token].type;
  7754. }
  7755. llama_token llama_token_bos(const struct llama_model * model) {
  7756. return model->vocab.special_bos_id;
  7757. }
  7758. llama_token llama_token_eos(const struct llama_model * model) {
  7759. return model->vocab.special_eos_id;
  7760. }
  7761. llama_token llama_token_nl(const struct llama_model * model) {
  7762. return model->vocab.linefeed_id;
  7763. }
  7764. int llama_add_bos_token(const struct llama_model * model) {
  7765. return model->vocab.special_add_bos;
  7766. }
  7767. int llama_add_eos_token(const struct llama_model * model) {
  7768. return model->vocab.special_add_eos;
  7769. }
  7770. llama_token llama_token_prefix(const struct llama_model * model) {
  7771. return model->vocab.special_prefix_id;
  7772. }
  7773. llama_token llama_token_middle(const struct llama_model * model) {
  7774. return model->vocab.special_middle_id;
  7775. }
  7776. llama_token llama_token_suffix(const struct llama_model * model) {
  7777. return model->vocab.special_suffix_id;
  7778. }
  7779. llama_token llama_token_eot(const struct llama_model * model) {
  7780. return model->vocab.special_eot_id;
  7781. }
  7782. int llama_tokenize(
  7783. const struct llama_model * model,
  7784. const char * text,
  7785. int text_len,
  7786. llama_token * tokens,
  7787. int n_max_tokens,
  7788. bool add_bos,
  7789. bool special) {
  7790. auto res = llama_tokenize_internal(model->vocab, std::string(text, text_len), add_bos, special);
  7791. if (n_max_tokens < (int) res.size()) {
  7792. // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
  7793. return -((int) res.size());
  7794. }
  7795. for (size_t i = 0; i < res.size(); i++) {
  7796. tokens[i] = res[i];
  7797. }
  7798. return res.size();
  7799. }
  7800. static std::string llama_decode_text(const std::string & text) {
  7801. std::string decoded_text;
  7802. auto unicode_sequences = codepoints_from_utf8(text);
  7803. for (auto& unicode_sequence : unicode_sequences) {
  7804. decoded_text += unicode_to_bytes_bpe(codepoint_to_utf8(unicode_sequence));
  7805. }
  7806. return decoded_text;
  7807. }
  7808. // does not write null-terminator to buf
  7809. int llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int length) {
  7810. if (0 <= token && token < llama_n_vocab(model)) {
  7811. switch (llama_vocab_get_type(model->vocab)) {
  7812. case LLAMA_VOCAB_TYPE_SPM: {
  7813. if (llama_is_normal_token(model->vocab, token)) {
  7814. std::string result = model->vocab.id_to_token[token].text;
  7815. llama_unescape_whitespace(result);
  7816. if (length < (int) result.length()) {
  7817. return -result.length();
  7818. }
  7819. memcpy(buf, result.c_str(), result.length());
  7820. return result.length();
  7821. } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
  7822. if (length < 3) {
  7823. return -3;
  7824. }
  7825. memcpy(buf, "\xe2\x96\x85", 3);
  7826. return 3;
  7827. } else if (llama_is_control_token(model->vocab, token)) {
  7828. ;
  7829. } else if (llama_is_byte_token(model->vocab, token)) {
  7830. if (length < 1) {
  7831. return -1;
  7832. }
  7833. buf[0] = llama_token_to_byte(model->vocab, token);
  7834. return 1;
  7835. } else {
  7836. // TODO: for now we accept all unsupported token types,
  7837. // suppressing them like CONTROL tokens.
  7838. // GGML_ASSERT(false);
  7839. }
  7840. break;
  7841. }
  7842. case LLAMA_VOCAB_TYPE_BPE: {
  7843. if (llama_is_normal_token(model->vocab, token)) {
  7844. std::string result = model->vocab.id_to_token[token].text;
  7845. result = llama_decode_text(result);
  7846. if (length < (int) result.length()) {
  7847. return -result.length();
  7848. }
  7849. memcpy(buf, result.c_str(), result.length());
  7850. return result.length();
  7851. } else if (llama_is_control_token(model->vocab, token)) {
  7852. ;
  7853. } else {
  7854. // TODO: for now we accept all unsupported token types,
  7855. // suppressing them like CONTROL tokens.
  7856. // GGML_ASSERT(false);
  7857. }
  7858. break;
  7859. }
  7860. default:
  7861. GGML_ASSERT(false);
  7862. }
  7863. }
  7864. return 0;
  7865. }
  7866. struct llama_timings llama_get_timings(struct llama_context * ctx) {
  7867. struct llama_timings result = {
  7868. /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
  7869. /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
  7870. /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
  7871. /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
  7872. /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
  7873. /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
  7874. /*.n_sample =*/ std::max(1, ctx->n_sample),
  7875. /*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
  7876. /*.n_eval =*/ std::max(1, ctx->n_eval),
  7877. };
  7878. return result;
  7879. }
  7880. void llama_print_timings(struct llama_context * ctx) {
  7881. const llama_timings timings = llama_get_timings(ctx);
  7882. LLAMA_LOG_INFO("\n");
  7883. LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms);
  7884. LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  7885. __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
  7886. LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  7887. __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
  7888. LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  7889. __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
  7890. LLAMA_LOG_INFO("%s: total time = %10.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
  7891. }
  7892. void llama_reset_timings(struct llama_context * ctx) {
  7893. ctx->t_start_us = ggml_time_us();
  7894. ctx->t_sample_us = ctx->n_sample = 0;
  7895. ctx->t_eval_us = ctx->n_eval = 0;
  7896. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  7897. }
  7898. const char * llama_print_system_info(void) {
  7899. static std::string s;
  7900. s = "";
  7901. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  7902. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  7903. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  7904. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  7905. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  7906. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  7907. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  7908. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  7909. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  7910. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  7911. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  7912. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  7913. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  7914. s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
  7915. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  7916. return s.c_str();
  7917. }
  7918. void llama_dump_timing_info_yaml(FILE * stream, const llama_context * ctx) {
  7919. fprintf(stream, "\n");
  7920. fprintf(stream, "###########\n");
  7921. fprintf(stream, "# Timings #\n");
  7922. fprintf(stream, "###########\n");
  7923. fprintf(stream, "\n");
  7924. fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
  7925. 1.0e-3 * ctx->t_eval_us / ctx->n_eval);
  7926. fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
  7927. 1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
  7928. fprintf(stream, "mst_sample: %.2f # ms / token during sampling\n",
  7929. 1.0e-3 * ctx->t_sample_us / ctx->n_sample);
  7930. fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
  7931. fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
  7932. fprintf(stream, "n_sample: %d # number of sampled tokens\n", ctx->n_sample);
  7933. fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
  7934. fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
  7935. fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
  7936. fprintf(stream, "t_sample_us: %" PRId64 " # total microseconds spent sampling\n", ctx->t_sample_us);
  7937. fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
  7938. 1.0e6 * ctx->n_eval / ctx->t_eval_us);
  7939. fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
  7940. 1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
  7941. fprintf(stream, "ts_sample: %.2f # tokens / second during sampling\n",
  7942. 1.0e6 * ctx->n_sample / ctx->t_sample_us);
  7943. }
  7944. // For internal test use
  7945. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
  7946. struct llama_context * ctx
  7947. ) {
  7948. return ctx->model.tensors_by_name;
  7949. }
  7950. void llama_log_set(ggml_log_callback log_callback, void * user_data) {
  7951. g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
  7952. g_state.log_callback_user_data = user_data;
  7953. }
  7954. static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
  7955. va_list args_copy;
  7956. va_copy(args_copy, args);
  7957. char buffer[128];
  7958. int len = vsnprintf(buffer, 128, format, args);
  7959. if (len < 128) {
  7960. g_state.log_callback(level, buffer, g_state.log_callback_user_data);
  7961. } else {
  7962. char* buffer2 = new char[len+1];
  7963. vsnprintf(buffer2, len+1, format, args_copy);
  7964. buffer2[len] = 0;
  7965. g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
  7966. delete[] buffer2;
  7967. }
  7968. va_end(args_copy);
  7969. }
  7970. static void llama_log_internal(ggml_log_level level, const char * format, ...) {
  7971. va_list args;
  7972. va_start(args, format);
  7973. llama_log_internal_v(level, format, args);
  7974. va_end(args);
  7975. }
  7976. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
  7977. (void) level;
  7978. (void) user_data;
  7979. fputs(text, stderr);
  7980. fflush(stderr);
  7981. }