llama.cpp 410 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873
  1. #define LLAMA_API_INTERNAL
  2. //#define LLAMA_GGML_BACKEND_CUDA_TEST // for testing only - enables ggml-cuda through ggml-backend, disables partial offloading
  3. #include "llama.h"
  4. #include "unicode.h"
  5. #include "ggml.h"
  6. #include "ggml-alloc.h"
  7. #include "ggml-backend.h"
  8. #ifdef GGML_USE_CUBLAS
  9. # include "ggml-cuda.h"
  10. #elif defined(GGML_USE_CLBLAST)
  11. # include "ggml-opencl.h"
  12. #endif
  13. #ifdef GGML_USE_METAL
  14. # include "ggml-metal.h"
  15. #endif
  16. #ifdef GGML_USE_MPI
  17. # include "ggml-mpi.h"
  18. #endif
  19. #ifndef QK_K
  20. # ifdef GGML_QKK_64
  21. # define QK_K 64
  22. # else
  23. # define QK_K 256
  24. # endif
  25. #endif
  26. #ifdef __has_include
  27. #if __has_include(<unistd.h>)
  28. #include <unistd.h>
  29. #if defined(_POSIX_MAPPED_FILES)
  30. #include <sys/mman.h>
  31. #include <fcntl.h>
  32. #endif
  33. #if defined(_POSIX_MEMLOCK_RANGE)
  34. #include <sys/resource.h>
  35. #endif
  36. #endif
  37. #endif
  38. #if defined(_WIN32)
  39. #define WIN32_LEAN_AND_MEAN
  40. #ifndef NOMINMAX
  41. #define NOMINMAX
  42. #endif
  43. #include <windows.h>
  44. #include <io.h>
  45. #endif
  46. #include <algorithm>
  47. #include <array>
  48. #include <cassert>
  49. #include <cinttypes>
  50. #include <climits>
  51. #include <cmath>
  52. #include <cstdarg>
  53. #include <cstddef>
  54. #include <cstdint>
  55. #include <cstdio>
  56. #include <cstring>
  57. #include <ctime>
  58. #include <forward_list>
  59. #include <fstream>
  60. #include <functional>
  61. #include <initializer_list>
  62. #include <map>
  63. #include <memory>
  64. #include <mutex>
  65. #include <numeric>
  66. #include <queue>
  67. #include <random>
  68. #include <regex>
  69. #include <set>
  70. #include <sstream>
  71. #include <thread>
  72. #include <type_traits>
  73. #include <unordered_map>
  74. #if defined(_MSC_VER)
  75. #pragma warning(disable: 4244 4267) // possible loss of data
  76. #endif
  77. #ifdef __GNUC__
  78. #ifdef __MINGW32__
  79. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  80. #else
  81. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  82. #endif
  83. #else
  84. #define LLAMA_ATTRIBUTE_FORMAT(...)
  85. #endif
  86. #define LLAMA_MAX_NODES 8192
  87. #define LLAMA_MAX_EXPERTS 8
  88. //
  89. // logging
  90. //
  91. LLAMA_ATTRIBUTE_FORMAT(2, 3)
  92. static void llama_log_internal (ggml_log_level level, const char* format, ...);
  93. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
  94. #define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
  95. #define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
  96. #define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  97. //
  98. // helpers
  99. //
  100. static size_t utf8_len(char src) {
  101. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  102. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  103. return lookup[highbits];
  104. }
  105. static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
  106. std::string result;
  107. for (size_t pos = 0; ; pos += search.length()) {
  108. auto new_pos = s.find(search, pos);
  109. if (new_pos == std::string::npos) {
  110. result += s.substr(pos, s.size() - pos);
  111. break;
  112. }
  113. result += s.substr(pos, new_pos - pos) + replace;
  114. pos = new_pos;
  115. }
  116. s = std::move(result);
  117. }
  118. static bool is_float_close(float a, float b, float abs_tol) {
  119. // Check for non-negative tolerance
  120. if (abs_tol < 0.0) {
  121. throw std::invalid_argument("Tolerance must be non-negative");
  122. }
  123. // Exact equality check
  124. if (a == b) {
  125. return true;
  126. }
  127. // Check for infinities
  128. if (std::isinf(a) || std::isinf(b)) {
  129. return false;
  130. }
  131. // Regular comparison using the provided absolute tolerance
  132. return std::fabs(b - a) <= abs_tol;
  133. }
  134. #ifdef GGML_USE_CPU_HBM
  135. #include <hbwmalloc.h>
  136. #endif
  137. static void zeros(std::ofstream & file, size_t n) {
  138. char zero = 0;
  139. for (size_t i = 0; i < n; ++i) {
  140. file.write(&zero, 1);
  141. }
  142. }
  143. LLAMA_ATTRIBUTE_FORMAT(1, 2)
  144. static std::string format(const char * fmt, ...) {
  145. va_list ap;
  146. va_list ap2;
  147. va_start(ap, fmt);
  148. va_copy(ap2, ap);
  149. int size = vsnprintf(NULL, 0, fmt, ap);
  150. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  151. std::vector<char> buf(size + 1);
  152. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  153. GGML_ASSERT(size2 == size);
  154. va_end(ap2);
  155. va_end(ap);
  156. return std::string(buf.data(), size);
  157. }
  158. //
  159. // gguf constants (sync with gguf.py)
  160. //
  161. enum llm_arch {
  162. LLM_ARCH_LLAMA,
  163. LLM_ARCH_FALCON,
  164. LLM_ARCH_BAICHUAN,
  165. LLM_ARCH_GPT2,
  166. LLM_ARCH_GPTJ,
  167. LLM_ARCH_GPTNEOX,
  168. LLM_ARCH_MPT,
  169. LLM_ARCH_STARCODER,
  170. LLM_ARCH_PERSIMMON,
  171. LLM_ARCH_REFACT,
  172. LLM_ARCH_BLOOM,
  173. LLM_ARCH_STABLELM,
  174. LLM_ARCH_QWEN,
  175. LLM_ARCH_PHI2,
  176. LLM_ARCH_PLAMO,
  177. LLM_ARCH_UNKNOWN,
  178. };
  179. static std::map<llm_arch, std::string> LLM_ARCH_NAMES = {
  180. { LLM_ARCH_LLAMA, "llama" },
  181. { LLM_ARCH_FALCON, "falcon" },
  182. { LLM_ARCH_GPT2, "gpt2" },
  183. { LLM_ARCH_GPTJ, "gptj" },
  184. { LLM_ARCH_GPTNEOX, "gptneox" },
  185. { LLM_ARCH_MPT, "mpt" },
  186. { LLM_ARCH_BAICHUAN, "baichuan" },
  187. { LLM_ARCH_STARCODER, "starcoder" },
  188. { LLM_ARCH_PERSIMMON, "persimmon" },
  189. { LLM_ARCH_REFACT, "refact" },
  190. { LLM_ARCH_BLOOM, "bloom" },
  191. { LLM_ARCH_STABLELM, "stablelm" },
  192. { LLM_ARCH_QWEN, "qwen" },
  193. { LLM_ARCH_PHI2, "phi2" },
  194. { LLM_ARCH_PLAMO, "plamo" },
  195. };
  196. enum llm_kv {
  197. LLM_KV_GENERAL_ARCHITECTURE,
  198. LLM_KV_GENERAL_QUANTIZATION_VERSION,
  199. LLM_KV_GENERAL_ALIGNMENT,
  200. LLM_KV_GENERAL_NAME,
  201. LLM_KV_GENERAL_AUTHOR,
  202. LLM_KV_GENERAL_URL,
  203. LLM_KV_GENERAL_DESCRIPTION,
  204. LLM_KV_GENERAL_LICENSE,
  205. LLM_KV_GENERAL_SOURCE_URL,
  206. LLM_KV_GENERAL_SOURCE_HF_REPO,
  207. LLM_KV_CONTEXT_LENGTH,
  208. LLM_KV_EMBEDDING_LENGTH,
  209. LLM_KV_BLOCK_COUNT,
  210. LLM_KV_FEED_FORWARD_LENGTH,
  211. LLM_KV_USE_PARALLEL_RESIDUAL,
  212. LLM_KV_TENSOR_DATA_LAYOUT,
  213. LLM_KV_EXPERT_COUNT,
  214. LLM_KV_EXPERT_USED_COUNT,
  215. LLM_KV_ATTENTION_HEAD_COUNT,
  216. LLM_KV_ATTENTION_HEAD_COUNT_KV,
  217. LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
  218. LLM_KV_ATTENTION_CLAMP_KQV,
  219. LLM_KV_ATTENTION_LAYERNORM_EPS,
  220. LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
  221. LLM_KV_ROPE_DIMENSION_COUNT,
  222. LLM_KV_ROPE_FREQ_BASE,
  223. LLM_KV_ROPE_SCALE_LINEAR,
  224. LLM_KV_ROPE_SCALING_TYPE,
  225. LLM_KV_ROPE_SCALING_FACTOR,
  226. LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
  227. LLM_KV_ROPE_SCALING_FINETUNED,
  228. LLM_KV_TOKENIZER_MODEL,
  229. LLM_KV_TOKENIZER_LIST,
  230. LLM_KV_TOKENIZER_TOKEN_TYPE,
  231. LLM_KV_TOKENIZER_SCORES,
  232. LLM_KV_TOKENIZER_MERGES,
  233. LLM_KV_TOKENIZER_BOS_ID,
  234. LLM_KV_TOKENIZER_EOS_ID,
  235. LLM_KV_TOKENIZER_UNK_ID,
  236. LLM_KV_TOKENIZER_SEP_ID,
  237. LLM_KV_TOKENIZER_PAD_ID,
  238. LLM_KV_TOKENIZER_ADD_BOS,
  239. LLM_KV_TOKENIZER_ADD_EOS,
  240. LLM_KV_TOKENIZER_HF_JSON,
  241. LLM_KV_TOKENIZER_RWKV,
  242. };
  243. static std::map<llm_kv, std::string> LLM_KV_NAMES = {
  244. { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
  245. { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
  246. { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
  247. { LLM_KV_GENERAL_NAME, "general.name" },
  248. { LLM_KV_GENERAL_AUTHOR, "general.author" },
  249. { LLM_KV_GENERAL_URL, "general.url" },
  250. { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
  251. { LLM_KV_GENERAL_LICENSE, "general.license" },
  252. { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" },
  253. { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" },
  254. { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
  255. { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
  256. { LLM_KV_BLOCK_COUNT, "%s.block_count" },
  257. { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
  258. { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
  259. { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
  260. { LLM_KV_EXPERT_COUNT, "%s.expert_count" },
  261. { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
  262. { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
  263. { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
  264. { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
  265. { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
  266. { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
  267. { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
  268. { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
  269. { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
  270. { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
  271. { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" },
  272. { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" },
  273. { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
  274. { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" },
  275. { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
  276. { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
  277. { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
  278. { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
  279. { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
  280. { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
  281. { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
  282. { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
  283. { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
  284. { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
  285. { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
  286. { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
  287. { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
  288. { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
  289. };
  290. struct LLM_KV {
  291. LLM_KV(llm_arch arch) : arch(arch) {}
  292. llm_arch arch;
  293. std::string operator()(llm_kv kv) const {
  294. return ::format(LLM_KV_NAMES[kv].c_str(), LLM_ARCH_NAMES[arch].c_str());
  295. }
  296. };
  297. enum llm_tensor {
  298. LLM_TENSOR_TOKEN_EMBD,
  299. LLM_TENSOR_TOKEN_EMBD_NORM,
  300. LLM_TENSOR_POS_EMBD,
  301. LLM_TENSOR_OUTPUT,
  302. LLM_TENSOR_OUTPUT_NORM,
  303. LLM_TENSOR_ROPE_FREQS,
  304. LLM_TENSOR_ATTN_Q,
  305. LLM_TENSOR_ATTN_K,
  306. LLM_TENSOR_ATTN_V,
  307. LLM_TENSOR_ATTN_QKV,
  308. LLM_TENSOR_ATTN_OUT,
  309. LLM_TENSOR_ATTN_NORM,
  310. LLM_TENSOR_ATTN_NORM_2,
  311. LLM_TENSOR_ATTN_ROT_EMBD,
  312. LLM_TENSOR_FFN_GATE_INP,
  313. LLM_TENSOR_FFN_NORM,
  314. LLM_TENSOR_FFN_GATE,
  315. LLM_TENSOR_FFN_DOWN,
  316. LLM_TENSOR_FFN_UP,
  317. LLM_TENSOR_FFN_ACT,
  318. LLM_TENSOR_FFN_DOWN_EXP,
  319. LLM_TENSOR_FFN_GATE_EXP,
  320. LLM_TENSOR_FFN_UP_EXP,
  321. LLM_TENSOR_ATTN_Q_NORM,
  322. LLM_TENSOR_ATTN_K_NORM,
  323. };
  324. static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
  325. {
  326. LLM_ARCH_LLAMA,
  327. {
  328. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  329. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  330. { LLM_TENSOR_OUTPUT, "output" },
  331. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  332. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  333. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  334. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  335. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  336. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  337. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  338. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  339. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  340. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  341. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  342. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  343. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  344. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  345. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  346. },
  347. },
  348. {
  349. LLM_ARCH_BAICHUAN,
  350. {
  351. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  352. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  353. { LLM_TENSOR_OUTPUT, "output" },
  354. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  355. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  356. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  357. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  358. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  359. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  360. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  361. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  362. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  363. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  364. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  365. },
  366. },
  367. {
  368. LLM_ARCH_FALCON,
  369. {
  370. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  371. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  372. { LLM_TENSOR_OUTPUT, "output" },
  373. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  374. { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
  375. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  376. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  377. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  378. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  379. },
  380. },
  381. {
  382. LLM_ARCH_GPT2,
  383. {
  384. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  385. { LLM_TENSOR_POS_EMBD, "position_embd" },
  386. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  387. { LLM_TENSOR_OUTPUT, "output" },
  388. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  389. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  390. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  391. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  392. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  393. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  394. },
  395. },
  396. {
  397. LLM_ARCH_GPTJ,
  398. {
  399. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  400. },
  401. },
  402. {
  403. LLM_ARCH_GPTNEOX,
  404. {
  405. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  406. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  407. { LLM_TENSOR_OUTPUT, "output" },
  408. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  409. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  410. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  411. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  412. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  413. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  414. },
  415. },
  416. {
  417. LLM_ARCH_PERSIMMON,
  418. {
  419. { LLM_TENSOR_TOKEN_EMBD, "token_embd"},
  420. { LLM_TENSOR_OUTPUT_NORM, "output_norm"},
  421. { LLM_TENSOR_OUTPUT, "output"},
  422. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm"},
  423. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv"},
  424. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output"},
  425. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
  426. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
  427. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm"},
  428. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down"},
  429. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up"},
  430. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd"},
  431. },
  432. },
  433. {
  434. LLM_ARCH_MPT,
  435. {
  436. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  437. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  438. { LLM_TENSOR_OUTPUT, "output" },
  439. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  440. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  441. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  442. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  443. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  444. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  445. { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" },
  446. },
  447. },
  448. {
  449. LLM_ARCH_STARCODER,
  450. {
  451. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  452. { LLM_TENSOR_POS_EMBD, "position_embd" },
  453. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  454. { LLM_TENSOR_OUTPUT, "output" },
  455. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  456. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  457. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  458. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  459. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  460. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  461. },
  462. },
  463. {
  464. LLM_ARCH_REFACT,
  465. {
  466. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  467. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  468. { LLM_TENSOR_OUTPUT, "output" },
  469. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  470. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  471. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  472. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  473. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  474. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  475. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  476. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  477. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  478. },
  479. },
  480. {
  481. LLM_ARCH_BLOOM,
  482. {
  483. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  484. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  485. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  486. { LLM_TENSOR_OUTPUT, "output" },
  487. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  488. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  489. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  490. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  491. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  492. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  493. },
  494. },
  495. {
  496. LLM_ARCH_STABLELM,
  497. {
  498. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  499. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  500. { LLM_TENSOR_OUTPUT, "output" },
  501. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  502. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  503. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  504. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  505. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  506. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  507. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  508. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  509. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  510. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  511. },
  512. },
  513. {
  514. LLM_ARCH_QWEN,
  515. {
  516. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  517. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  518. { LLM_TENSOR_OUTPUT, "output" },
  519. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  520. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  521. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  522. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  523. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  524. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  525. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  526. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  527. },
  528. },
  529. {
  530. LLM_ARCH_PHI2,
  531. {
  532. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  533. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  534. { LLM_TENSOR_OUTPUT, "output" },
  535. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  536. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  537. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  538. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  539. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  540. },
  541. },
  542. {
  543. LLM_ARCH_PLAMO,
  544. {
  545. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  546. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  547. { LLM_TENSOR_OUTPUT, "output" },
  548. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  549. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  550. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  551. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  552. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  553. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  554. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  555. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  556. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  557. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  558. },
  559. },
  560. {
  561. LLM_ARCH_UNKNOWN,
  562. {
  563. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  564. },
  565. },
  566. };
  567. static llm_arch llm_arch_from_string(const std::string & name) {
  568. for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
  569. if (kv.second == name) {
  570. return kv.first;
  571. }
  572. }
  573. return LLM_ARCH_UNKNOWN;
  574. }
  575. // helper to handle gguf constants
  576. // usage:
  577. //
  578. // const auto tn = LLM_TN(LLM_ARCH_LLAMA);
  579. //
  580. // std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
  581. // std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
  582. // std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
  583. //
  584. struct LLM_TN {
  585. LLM_TN(llm_arch arch) : arch(arch) {}
  586. llm_arch arch;
  587. std::string operator()(llm_tensor tensor) const {
  588. return LLM_TENSOR_NAMES[arch].at(tensor);
  589. }
  590. std::string operator()(llm_tensor tensor, const std::string & suffix) const {
  591. return LLM_TENSOR_NAMES[arch].at(tensor) + "." + suffix;
  592. }
  593. std::string operator()(llm_tensor tensor, int bid) const {
  594. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid);
  595. }
  596. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
  597. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid) + "." + suffix;
  598. }
  599. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
  600. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid, xid) + "." + suffix;
  601. }
  602. };
  603. //
  604. // gguf helpers
  605. //
  606. static std::map<int8_t, std::string> LLAMA_ROPE_SCALING_TYPES = {
  607. { LLAMA_ROPE_SCALING_NONE, "none" },
  608. { LLAMA_ROPE_SCALING_LINEAR, "linear" },
  609. { LLAMA_ROPE_SCALING_YARN, "yarn" },
  610. };
  611. static int8_t llama_rope_scaling_type_from_string(const std::string & name) {
  612. for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
  613. if (kv.second == name) {
  614. return kv.first;
  615. }
  616. }
  617. return LLAMA_ROPE_SCALING_UNSPECIFIED;
  618. }
  619. static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
  620. switch (type) {
  621. case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
  622. case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
  623. case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
  624. case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
  625. case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
  626. case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
  627. case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
  628. case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
  629. case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
  630. case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
  631. case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
  632. default: return format("unknown type %d", type);
  633. }
  634. }
  635. static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
  636. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  637. switch (type) {
  638. case GGUF_TYPE_STRING:
  639. return gguf_get_val_str(ctx_gguf, i);
  640. case GGUF_TYPE_ARRAY:
  641. {
  642. const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
  643. int arr_n = gguf_get_arr_n(ctx_gguf, i);
  644. const void * data = gguf_get_arr_data(ctx_gguf, i);
  645. std::stringstream ss;
  646. ss << "[";
  647. for (int j = 0; j < arr_n; j++) {
  648. if (arr_type == GGUF_TYPE_STRING) {
  649. std::string val = gguf_get_arr_str(ctx_gguf, i, j);
  650. // escape quotes
  651. replace_all(val, "\\", "\\\\");
  652. replace_all(val, "\"", "\\\"");
  653. ss << '"' << val << '"';
  654. } else if (arr_type == GGUF_TYPE_ARRAY) {
  655. ss << "???";
  656. } else {
  657. ss << gguf_data_to_str(arr_type, data, j);
  658. }
  659. if (j < arr_n - 1) {
  660. ss << ", ";
  661. }
  662. }
  663. ss << "]";
  664. return ss.str();
  665. }
  666. default:
  667. return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
  668. }
  669. }
  670. //
  671. // ggml helpers
  672. //
  673. static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
  674. struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
  675. if (plan.work_size > 0) {
  676. buf.resize(plan.work_size);
  677. plan.work_data = buf.data();
  678. }
  679. ggml_graph_compute(graph, &plan);
  680. }
  681. //
  682. // llama helpers
  683. //
  684. #if defined(_WIN32)
  685. static std::string llama_format_win_err(DWORD err) {
  686. LPSTR buf;
  687. size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
  688. NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
  689. if (!size) {
  690. return "FormatMessageA failed";
  691. }
  692. std::string ret(buf, size);
  693. LocalFree(buf);
  694. return ret;
  695. }
  696. #endif
  697. template <typename T>
  698. struct no_init {
  699. T value;
  700. no_init() { /* do nothing */ }
  701. };
  702. struct llama_file {
  703. // use FILE * so we don't have to re-open the file to mmap
  704. FILE * fp;
  705. size_t size;
  706. llama_file(const char * fname, const char * mode) {
  707. fp = std::fopen(fname, mode);
  708. if (fp == NULL) {
  709. throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
  710. }
  711. seek(0, SEEK_END);
  712. size = tell();
  713. seek(0, SEEK_SET);
  714. }
  715. size_t tell() const {
  716. #ifdef _WIN32
  717. __int64 ret = _ftelli64(fp);
  718. #else
  719. long ret = std::ftell(fp);
  720. #endif
  721. GGML_ASSERT(ret != -1); // this really shouldn't fail
  722. return (size_t) ret;
  723. }
  724. void seek(size_t offset, int whence) const {
  725. #ifdef _WIN32
  726. int ret = _fseeki64(fp, (__int64) offset, whence);
  727. #else
  728. int ret = std::fseek(fp, (long) offset, whence);
  729. #endif
  730. GGML_ASSERT(ret == 0); // same
  731. }
  732. void read_raw(void * ptr, size_t len) const {
  733. if (len == 0) {
  734. return;
  735. }
  736. errno = 0;
  737. std::size_t ret = std::fread(ptr, len, 1, fp);
  738. if (ferror(fp)) {
  739. throw std::runtime_error(format("read error: %s", strerror(errno)));
  740. }
  741. if (ret != 1) {
  742. throw std::runtime_error("unexpectedly reached end of file");
  743. }
  744. }
  745. uint32_t read_u32() const {
  746. uint32_t ret;
  747. read_raw(&ret, sizeof(ret));
  748. return ret;
  749. }
  750. void write_raw(const void * ptr, size_t len) const {
  751. if (len == 0) {
  752. return;
  753. }
  754. errno = 0;
  755. size_t ret = std::fwrite(ptr, len, 1, fp);
  756. if (ret != 1) {
  757. throw std::runtime_error(format("write error: %s", strerror(errno)));
  758. }
  759. }
  760. void write_u32(std::uint32_t val) const {
  761. write_raw(&val, sizeof(val));
  762. }
  763. ~llama_file() {
  764. if (fp) {
  765. std::fclose(fp);
  766. }
  767. }
  768. };
  769. struct llama_mmap {
  770. void * addr;
  771. size_t size;
  772. llama_mmap(const llama_mmap &) = delete;
  773. #ifdef _POSIX_MAPPED_FILES
  774. static constexpr bool SUPPORTED = true;
  775. // list of mapped fragments (first_offset, last_offset)
  776. std::vector<std::pair<size_t, size_t>> mapped_fragments;
  777. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
  778. size = file->size;
  779. int fd = fileno(file->fp);
  780. int flags = MAP_SHARED;
  781. // prefetch/readahead impairs performance on NUMA systems
  782. if (numa) { prefetch = 0; }
  783. #ifdef __linux__
  784. // advise the kernel to read the file sequentially (increases readahead)
  785. if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
  786. LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n",
  787. strerror(errno));
  788. }
  789. if (prefetch) { flags |= MAP_POPULATE; }
  790. #endif
  791. addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
  792. if (addr == MAP_FAILED) { // NOLINT
  793. throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
  794. }
  795. if (prefetch > 0) {
  796. // advise the kernel to preload the mapped memory
  797. if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
  798. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
  799. strerror(errno));
  800. }
  801. }
  802. if (numa) {
  803. // advise the kernel not to use readahead
  804. // (because the next page might not belong on the same node)
  805. if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
  806. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
  807. strerror(errno));
  808. }
  809. }
  810. // initialize list of mapped_fragments
  811. mapped_fragments.emplace_back(0, file->size);
  812. }
  813. static void align_range(size_t * first, size_t * last, size_t page_size) {
  814. // align first to the next page
  815. size_t offset_in_page = *first & (page_size - 1);
  816. size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page;
  817. *first += offset_to_page;
  818. // align last to the previous page
  819. *last = *last & ~(page_size - 1);
  820. if (*last <= *first) {
  821. *last = *first;
  822. }
  823. }
  824. // partially unmap the file in the range [first, last)
  825. void unmap_fragment(size_t first, size_t last) {
  826. // note: this function must not be called multiple times with overlapping ranges
  827. // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
  828. int page_size = sysconf(_SC_PAGESIZE);
  829. align_range(&first, &last, page_size);
  830. size_t len = last - first;
  831. if (len == 0) {
  832. return;
  833. }
  834. GGML_ASSERT(first % page_size == 0);
  835. GGML_ASSERT(last % page_size == 0);
  836. GGML_ASSERT(last > first);
  837. void * next_page_start = (uint8_t *) addr + first;
  838. // unmap the range
  839. if (munmap(next_page_start, len)) {
  840. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  841. }
  842. // update the list of mapped fragments to avoid unmapping the same range again in the destructor
  843. std::vector<std::pair<size_t, size_t>> new_mapped_fragments;
  844. for (const auto & frag : mapped_fragments) {
  845. if (frag.first < first && frag.second > last) {
  846. // the range is in the middle of the fragment, split it
  847. new_mapped_fragments.emplace_back(frag.first, first);
  848. new_mapped_fragments.emplace_back(last, frag.second);
  849. } else if (frag.first < first && frag.second > first) {
  850. // the range starts in the middle of the fragment
  851. new_mapped_fragments.emplace_back(frag.first, first);
  852. } else if (frag.first < last && frag.second > last) {
  853. // the range ends in the middle of the fragment
  854. new_mapped_fragments.emplace_back(last, frag.second);
  855. } else if (frag.first >= first && frag.second <= last) {
  856. // the range covers the entire fragment
  857. } else {
  858. // the range is outside the fragment
  859. new_mapped_fragments.push_back(frag);
  860. }
  861. }
  862. mapped_fragments = std::move(new_mapped_fragments);
  863. }
  864. ~llama_mmap() {
  865. for (const auto & frag : mapped_fragments) {
  866. if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
  867. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  868. }
  869. }
  870. }
  871. #elif defined(_WIN32)
  872. static constexpr bool SUPPORTED = true;
  873. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) {
  874. GGML_UNUSED(numa);
  875. size = file->size;
  876. HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
  877. HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
  878. if (hMapping == NULL) {
  879. DWORD error = GetLastError();
  880. throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
  881. }
  882. addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
  883. DWORD error = GetLastError();
  884. CloseHandle(hMapping);
  885. if (addr == NULL) {
  886. throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
  887. }
  888. if (prefetch > 0) {
  889. // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
  890. BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
  891. HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
  892. // may fail on pre-Windows 8 systems
  893. pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
  894. if (pPrefetchVirtualMemory) {
  895. // advise the kernel to preload the mapped memory
  896. WIN32_MEMORY_RANGE_ENTRY range;
  897. range.VirtualAddress = addr;
  898. range.NumberOfBytes = (SIZE_T) std::min(size, prefetch);
  899. if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
  900. LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n",
  901. llama_format_win_err(GetLastError()).c_str());
  902. }
  903. }
  904. }
  905. }
  906. void unmap_fragment(size_t first, size_t last) {
  907. // not supported
  908. GGML_UNUSED(first);
  909. GGML_UNUSED(last);
  910. }
  911. ~llama_mmap() {
  912. if (!UnmapViewOfFile(addr)) {
  913. LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n",
  914. llama_format_win_err(GetLastError()).c_str());
  915. }
  916. }
  917. #else
  918. static constexpr bool SUPPORTED = false;
  919. llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) {
  920. GGML_UNUSED(file);
  921. GGML_UNUSED(prefetch);
  922. GGML_UNUSED(numa);
  923. throw std::runtime_error("mmap not supported");
  924. }
  925. void unmap_fragment(size_t first, size_t last) {
  926. GGML_UNUSED(first);
  927. GGML_UNUSED(last);
  928. throw std::runtime_error("mmap not supported");
  929. }
  930. #endif
  931. };
  932. // Represents some region of memory being locked using mlock or VirtualLock;
  933. // will automatically unlock on destruction.
  934. struct llama_mlock {
  935. void * addr = NULL;
  936. size_t size = 0;
  937. bool failed_already = false;
  938. llama_mlock() {}
  939. llama_mlock(const llama_mlock &) = delete;
  940. ~llama_mlock() {
  941. if (size) {
  942. raw_unlock(addr, size);
  943. }
  944. }
  945. void init(void * ptr) {
  946. GGML_ASSERT(addr == NULL && size == 0); // NOLINT
  947. addr = ptr;
  948. }
  949. void grow_to(size_t target_size) {
  950. GGML_ASSERT(addr);
  951. if (failed_already) {
  952. return;
  953. }
  954. size_t granularity = lock_granularity();
  955. target_size = (target_size + granularity - 1) & ~(granularity - 1);
  956. if (target_size > size) {
  957. if (raw_lock((uint8_t *) addr + size, target_size - size)) {
  958. size = target_size;
  959. } else {
  960. failed_already = true;
  961. }
  962. }
  963. }
  964. #ifdef _POSIX_MEMLOCK_RANGE
  965. static constexpr bool SUPPORTED = true;
  966. static size_t lock_granularity() {
  967. return (size_t) sysconf(_SC_PAGESIZE);
  968. }
  969. #ifdef __APPLE__
  970. #define MLOCK_SUGGESTION \
  971. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  972. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
  973. #else
  974. #define MLOCK_SUGGESTION \
  975. "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
  976. #endif
  977. bool raw_lock(const void * addr, size_t size) const {
  978. if (!mlock(addr, size)) {
  979. return true;
  980. }
  981. char* errmsg = std::strerror(errno);
  982. bool suggest = (errno == ENOMEM);
  983. // Check if the resource limit is fine after all
  984. struct rlimit lock_limit;
  985. if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
  986. suggest = false;
  987. }
  988. if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
  989. suggest = false;
  990. }
  991. fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
  992. size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
  993. return false;
  994. }
  995. #undef MLOCK_SUGGESTION
  996. static void raw_unlock(void * addr, size_t size) {
  997. if (munlock(addr, size)) {
  998. fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
  999. }
  1000. }
  1001. #elif defined(_WIN32)
  1002. static constexpr bool SUPPORTED = true;
  1003. static size_t lock_granularity() {
  1004. SYSTEM_INFO si;
  1005. GetSystemInfo(&si);
  1006. return (size_t) si.dwPageSize;
  1007. }
  1008. bool raw_lock(void * ptr, size_t len) const {
  1009. for (int tries = 1; ; tries++) {
  1010. if (VirtualLock(ptr, len)) {
  1011. return true;
  1012. }
  1013. if (tries == 2) {
  1014. fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
  1015. len, size, llama_format_win_err(GetLastError()).c_str());
  1016. return false;
  1017. }
  1018. // It failed but this was only the first try; increase the working
  1019. // set size and try again.
  1020. SIZE_T min_ws_size, max_ws_size;
  1021. if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
  1022. fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
  1023. llama_format_win_err(GetLastError()).c_str());
  1024. return false;
  1025. }
  1026. // Per MSDN: "The maximum number of pages that a process can lock
  1027. // is equal to the number of pages in its minimum working set minus
  1028. // a small overhead."
  1029. // Hopefully a megabyte is enough overhead:
  1030. size_t increment = len + 1048576;
  1031. // The minimum must be <= the maximum, so we need to increase both:
  1032. min_ws_size += increment;
  1033. max_ws_size += increment;
  1034. if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
  1035. fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
  1036. llama_format_win_err(GetLastError()).c_str());
  1037. return false;
  1038. }
  1039. }
  1040. }
  1041. static void raw_unlock(void * ptr, size_t len) {
  1042. if (!VirtualUnlock(ptr, len)) {
  1043. fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
  1044. llama_format_win_err(GetLastError()).c_str());
  1045. }
  1046. }
  1047. #else
  1048. static constexpr bool SUPPORTED = false;
  1049. static size_t lock_granularity() {
  1050. return (size_t) 65536;
  1051. }
  1052. bool raw_lock(const void * addr, size_t len) const {
  1053. fprintf(stderr, "warning: mlock not supported on this system\n");
  1054. return false;
  1055. }
  1056. static void raw_unlock(const void * addr, size_t len) {}
  1057. #endif
  1058. };
  1059. typedef void (*offload_func_t)(struct ggml_tensor * tensor);
  1060. static void ggml_offload_nop(struct ggml_tensor * tensor) {
  1061. (void) tensor;
  1062. }
  1063. static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) {
  1064. std::vector<char> result(8, 0);
  1065. const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
  1066. if (n_tokens < 0) {
  1067. result.resize(-n_tokens);
  1068. int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
  1069. GGML_ASSERT(check == -n_tokens);
  1070. }
  1071. else {
  1072. result.resize(n_tokens);
  1073. }
  1074. return std::string(result.data(), result.size());
  1075. }
  1076. static ggml_backend_buffer_type_t llama_default_buffer_type(int n_gpu_layers) {
  1077. ggml_backend_buffer_type_t buft = nullptr;
  1078. #ifdef GGML_USE_METAL
  1079. if (n_gpu_layers > 0) {
  1080. buft = ggml_backend_metal_buffer_type();
  1081. }
  1082. #elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  1083. if (n_gpu_layers > 0) {
  1084. buft = ggml_backend_cuda_buffer_type(0);
  1085. }
  1086. #elif defined(GGML_USE_CUBLAS)
  1087. buft = ggml_backend_cuda_host_buffer_type();
  1088. #elif defined(GGML_USE_CPU_HBM)
  1089. buft = ggml_backend_cpu_hbm_buffer_type();
  1090. #endif
  1091. if (buft == nullptr) {
  1092. buft = ggml_backend_cpu_buffer_type();
  1093. }
  1094. return buft;
  1095. GGML_UNUSED(n_gpu_layers);
  1096. }
  1097. //
  1098. // globals
  1099. //
  1100. struct llama_state {
  1101. llama_state() {
  1102. #ifdef GGML_USE_METAL
  1103. ggml_metal_log_set_callback(log_callback, log_callback_user_data);
  1104. #endif
  1105. }
  1106. // We save the log callback globally
  1107. ggml_log_callback log_callback = llama_log_callback_default;
  1108. void * log_callback_user_data = nullptr;
  1109. };
  1110. static llama_state g_state;
  1111. // available llama models
  1112. enum e_model {
  1113. MODEL_UNKNOWN,
  1114. MODEL_1B,
  1115. MODEL_3B,
  1116. MODEL_7B,
  1117. MODEL_8B,
  1118. MODEL_13B,
  1119. MODEL_15B,
  1120. MODEL_30B,
  1121. MODEL_34B,
  1122. MODEL_40B,
  1123. MODEL_65B,
  1124. MODEL_70B,
  1125. MODEL_SMALL,
  1126. MODEL_MEDIUM,
  1127. MODEL_LARGE,
  1128. MODEL_XL,
  1129. };
  1130. static const size_t kiB = 1024;
  1131. static const size_t MiB = 1024*kiB;
  1132. static const size_t GiB = 1024*MiB;
  1133. struct llama_hparams {
  1134. bool vocab_only;
  1135. uint32_t n_vocab;
  1136. uint32_t n_ctx_train; // context size the model was trained on
  1137. uint32_t n_embd;
  1138. uint32_t n_head;
  1139. uint32_t n_head_kv;
  1140. uint32_t n_layer;
  1141. uint32_t n_rot;
  1142. uint32_t n_ff;
  1143. uint32_t n_expert = 0;
  1144. uint32_t n_expert_used = 0;
  1145. float f_norm_eps;
  1146. float f_norm_rms_eps;
  1147. float rope_freq_base_train;
  1148. float rope_freq_scale_train;
  1149. uint32_t n_yarn_orig_ctx;
  1150. int8_t rope_scaling_type_train : 3;
  1151. bool rope_finetuned : 1;
  1152. float f_clamp_kqv;
  1153. float f_max_alibi_bias;
  1154. bool operator!=(const llama_hparams & other) const {
  1155. if (this->vocab_only != other.vocab_only) return true;
  1156. if (this->n_vocab != other.n_vocab) return true;
  1157. if (this->n_ctx_train != other.n_ctx_train) return true;
  1158. if (this->n_embd != other.n_embd) return true;
  1159. if (this->n_head != other.n_head) return true;
  1160. if (this->n_head_kv != other.n_head_kv) return true;
  1161. if (this->n_layer != other.n_layer) return true;
  1162. if (this->n_rot != other.n_rot) return true;
  1163. if (this->n_ff != other.n_ff) return true;
  1164. if (this->n_expert != other.n_expert) return true;
  1165. if (this->n_expert_used != other.n_expert_used) return true;
  1166. if (this->rope_finetuned != other.rope_finetuned) return true;
  1167. if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true;
  1168. const float EPSILON = 1e-9f;
  1169. if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
  1170. if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
  1171. if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true;
  1172. if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true;
  1173. return false;
  1174. }
  1175. uint32_t n_gqa() const {
  1176. return n_head/n_head_kv;
  1177. }
  1178. uint32_t n_embd_head() const {
  1179. return n_embd/n_head;
  1180. }
  1181. uint32_t n_embd_gqa() const {
  1182. return n_embd/n_gqa();
  1183. }
  1184. };
  1185. struct llama_cparams {
  1186. uint32_t n_ctx; // context size used during inference
  1187. uint32_t n_batch;
  1188. uint32_t n_threads; // number of threads to use for generation
  1189. uint32_t n_threads_batch; // number of threads to use for batch processing
  1190. float rope_freq_base;
  1191. float rope_freq_scale;
  1192. uint32_t n_yarn_orig_ctx;
  1193. // These hyperparameters are not exposed in GGUF, because all
  1194. // existing YaRN models use the same values for them.
  1195. float yarn_ext_factor;
  1196. float yarn_attn_factor;
  1197. float yarn_beta_fast;
  1198. float yarn_beta_slow;
  1199. bool mul_mat_q;
  1200. bool offload_kqv;
  1201. };
  1202. struct llama_layer {
  1203. // normalization
  1204. struct ggml_tensor * attn_norm;
  1205. struct ggml_tensor * attn_norm_b;
  1206. struct ggml_tensor * attn_norm_2;
  1207. struct ggml_tensor * attn_norm_2_b;
  1208. struct ggml_tensor * attn_q_norm;
  1209. struct ggml_tensor * attn_q_norm_b;
  1210. struct ggml_tensor * attn_k_norm;
  1211. struct ggml_tensor * attn_k_norm_b;
  1212. // attention
  1213. struct ggml_tensor * wq;
  1214. struct ggml_tensor * wk;
  1215. struct ggml_tensor * wv;
  1216. struct ggml_tensor * wo;
  1217. struct ggml_tensor * wqkv;
  1218. // attention bias
  1219. struct ggml_tensor * bq;
  1220. struct ggml_tensor * bk;
  1221. struct ggml_tensor * bv;
  1222. struct ggml_tensor * bo;
  1223. struct ggml_tensor * bqkv;
  1224. // normalization
  1225. struct ggml_tensor * ffn_norm;
  1226. struct ggml_tensor * ffn_norm_b;
  1227. // ff
  1228. struct ggml_tensor * ffn_gate; // w1
  1229. struct ggml_tensor * ffn_down; // w2
  1230. struct ggml_tensor * ffn_up; // w3
  1231. // ff MoE
  1232. struct ggml_tensor * ffn_gate_inp;
  1233. struct ggml_tensor * ffn_gate_exp[LLAMA_MAX_EXPERTS];
  1234. struct ggml_tensor * ffn_down_exp[LLAMA_MAX_EXPERTS];
  1235. struct ggml_tensor * ffn_up_exp [LLAMA_MAX_EXPERTS];
  1236. // ff bias
  1237. struct ggml_tensor * ffn_down_b; // b2
  1238. struct ggml_tensor * ffn_up_b; // b3
  1239. struct ggml_tensor * ffn_act;
  1240. };
  1241. struct llama_kv_cell {
  1242. llama_pos pos = -1;
  1243. llama_pos delta = 0;
  1244. std::set<llama_seq_id> seq_id;
  1245. bool has_seq_id(const llama_seq_id & id) const {
  1246. return seq_id.find(id) != seq_id.end();
  1247. }
  1248. };
  1249. // ring-buffer of cached KV data
  1250. struct llama_kv_cache {
  1251. bool has_shift = false;
  1252. // Note: The value of head isn't only used to optimize searching
  1253. // for a free KV slot. llama_decode_internal also uses it, so it
  1254. // cannot be freely changed after a slot has been allocated.
  1255. uint32_t head = 0;
  1256. uint32_t size = 0;
  1257. uint32_t used = 0; // used cells (i.e. at least one seq_id)
  1258. // computed before each graph build
  1259. uint32_t n = 0;
  1260. std::vector<llama_kv_cell> cells;
  1261. std::vector<struct ggml_tensor *> k_l; // per layer
  1262. std::vector<struct ggml_tensor *> v_l;
  1263. struct ggml_context * ctx = NULL;
  1264. ggml_backend_buffer_t buf = NULL;
  1265. ~llama_kv_cache() {
  1266. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  1267. if (ggml_cublas_loaded()) {
  1268. for (size_t i = 0; i < k_l.size(); ++i) {
  1269. ggml_cuda_free_data(k_l[i]);
  1270. ggml_cuda_free_data(v_l[i]);
  1271. }
  1272. }
  1273. #endif
  1274. if (ctx) {
  1275. ggml_free(ctx);
  1276. }
  1277. ggml_backend_buffer_free(buf);
  1278. }
  1279. };
  1280. struct llama_vocab {
  1281. using id = int32_t;
  1282. using token = std::string;
  1283. using ttype = llama_token_type;
  1284. struct token_data {
  1285. token text;
  1286. float score;
  1287. ttype type;
  1288. };
  1289. enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM;
  1290. std::unordered_map<token, id> token_to_id;
  1291. std::vector<token_data> id_to_token;
  1292. std::unordered_map<token, id> special_tokens_cache;
  1293. std::map<std::pair<std::string, std::string>, int> bpe_ranks;
  1294. // default LLaMA special tokens
  1295. id special_bos_id = 1;
  1296. id special_eos_id = 2;
  1297. id special_unk_id = 0;
  1298. id special_sep_id = -1;
  1299. id special_pad_id = -1;
  1300. int special_add_bos = -1; // -1 unknown, 1 add, 0 don't add.
  1301. int special_add_eos = -1; // -1 unknown, 1 add, 0 don't add.
  1302. id linefeed_id = 13;
  1303. id special_prefix_id = 32007;
  1304. id special_middle_id = 32009;
  1305. id special_suffix_id = 32008;
  1306. id special_eot_id = 32010;
  1307. int find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
  1308. GGML_ASSERT(token_left.find(' ') == std::string::npos);
  1309. GGML_ASSERT(token_left.find('\n') == std::string::npos);
  1310. GGML_ASSERT(token_right.find(' ') == std::string::npos);
  1311. GGML_ASSERT(token_right.find('\n') == std::string::npos);
  1312. auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
  1313. if (it == bpe_ranks.end()) {
  1314. return -1;
  1315. }
  1316. return it->second;
  1317. }
  1318. };
  1319. struct llama_model {
  1320. e_model type = MODEL_UNKNOWN;
  1321. llm_arch arch = LLM_ARCH_UNKNOWN;
  1322. llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
  1323. std::string name = "n/a";
  1324. llama_hparams hparams = {};
  1325. llama_vocab vocab;
  1326. struct ggml_tensor * tok_embd;
  1327. struct ggml_tensor * pos_embd;
  1328. struct ggml_tensor * tok_norm;
  1329. struct ggml_tensor * tok_norm_b;
  1330. struct ggml_tensor * output_norm;
  1331. struct ggml_tensor * output_norm_b;
  1332. struct ggml_tensor * output;
  1333. struct ggml_tensor * output_b;
  1334. std::vector<llama_layer> layers;
  1335. int n_gpu_layers;
  1336. // gguf metadata
  1337. std::unordered_map<std::string, std::string> gguf_kv;
  1338. // context
  1339. struct ggml_context * ctx = NULL;
  1340. // the model memory buffer
  1341. ggml_backend_buffer_t buf = NULL;
  1342. // model memory mapped file
  1343. std::unique_ptr<llama_mmap> mapping;
  1344. // objects representing data potentially being locked in memory
  1345. llama_mlock mlock_buf;
  1346. llama_mlock mlock_mmap;
  1347. // for quantize-stats only
  1348. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  1349. int64_t t_load_us = 0;
  1350. int64_t t_start_us = 0;
  1351. ~llama_model() {
  1352. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  1353. if (ggml_cublas_loaded()) {
  1354. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  1355. ggml_cuda_free_data(tensors_by_name[i].second);
  1356. }
  1357. ggml_cuda_free_scratch();
  1358. }
  1359. #endif
  1360. #if defined(GGML_USE_CLBLAST)
  1361. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  1362. ggml_cl_free_data(tensors_by_name[i].second);
  1363. }
  1364. #endif
  1365. if (ctx) {
  1366. ggml_free(ctx);
  1367. }
  1368. ggml_backend_buffer_free(buf);
  1369. }
  1370. };
  1371. struct llama_context {
  1372. llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {}
  1373. ~llama_context() {
  1374. ggml_allocr_free(alloc);
  1375. ggml_backend_buffer_free(buf_alloc);
  1376. ggml_backend_free(backend);
  1377. }
  1378. llama_cparams cparams;
  1379. ggml_backend_t backend = nullptr;
  1380. const llama_model & model;
  1381. // key + value cache for the self attention
  1382. struct llama_kv_cache kv_self;
  1383. std::mt19937 rng;
  1384. bool has_evaluated_once = false;
  1385. int64_t t_start_us;
  1386. int64_t t_load_us;
  1387. int64_t t_sample_us = 0;
  1388. int64_t t_p_eval_us = 0;
  1389. int64_t t_eval_us = 0;
  1390. int32_t n_sample = 0; // number of tokens sampled
  1391. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  1392. int32_t n_eval = 0; // number of eval calls
  1393. // decode output (2-dimensional array: [n_tokens][n_vocab])
  1394. std::vector<float> logits;
  1395. #ifndef NDEBUG
  1396. // guard against access to unset logits
  1397. std::vector<bool> logits_valid;
  1398. #endif
  1399. bool logits_all = false;
  1400. // input embedding (1-dimensional array: [n_embd])
  1401. std::vector<float> embedding;
  1402. // memory buffers used to evaluate the model
  1403. std::vector<uint8_t> buf_compute_meta;
  1404. ggml_backend_buffer_t buf_alloc = NULL;
  1405. ggml_allocr * alloc = NULL;
  1406. // temporary buffer for copying data to/from the backend
  1407. std::vector<no_init<uint8_t>> buf_copy;
  1408. #ifdef GGML_USE_MPI
  1409. ggml_mpi_context * ctx_mpi = NULL;
  1410. #endif
  1411. };
  1412. //
  1413. // kv cache helpers
  1414. //
  1415. static bool llama_kv_cache_init(
  1416. const struct llama_hparams & hparams,
  1417. struct llama_kv_cache & cache,
  1418. ggml_type ktype,
  1419. ggml_type vtype,
  1420. uint32_t n_ctx,
  1421. int n_gpu_layers,
  1422. bool offload) {
  1423. const uint32_t n_embd = hparams.n_embd_gqa();
  1424. const uint32_t n_layer = hparams.n_layer;
  1425. cache.has_shift = false;
  1426. cache.head = 0;
  1427. cache.size = n_ctx;
  1428. cache.used = 0;
  1429. cache.cells.clear();
  1430. cache.cells.resize(n_ctx);
  1431. struct ggml_init_params params;
  1432. params.mem_size = 2u*n_layer*ggml_tensor_overhead();
  1433. params.mem_buffer = NULL;
  1434. params.no_alloc = true;
  1435. cache.ctx = ggml_init(params);
  1436. size_t vram_kv_cache = 0;
  1437. if (!cache.ctx) {
  1438. LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
  1439. return false;
  1440. }
  1441. cache.k_l.reserve(n_layer);
  1442. cache.v_l.reserve(n_layer);
  1443. const int i_gpu_start = (int) n_layer - n_gpu_layers;
  1444. for (int i = 0; i < (int) n_layer; i++) {
  1445. ggml_tensor * k = ggml_new_tensor_1d(cache.ctx, ktype, n_embd*n_ctx);
  1446. ggml_tensor * v = ggml_new_tensor_1d(cache.ctx, vtype, n_embd*n_ctx);
  1447. ggml_format_name(k, "cache_k_l%d", i);
  1448. ggml_format_name(v, "cache_v_l%d", i);
  1449. cache.k_l.push_back(k);
  1450. cache.v_l.push_back(v);
  1451. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  1452. if (i >= i_gpu_start) {
  1453. if (offload) {
  1454. ggml_cuda_assign_buffers_no_scratch(k);
  1455. ggml_cuda_assign_buffers_no_scratch(v);
  1456. vram_kv_cache += ggml_nbytes(k);
  1457. vram_kv_cache += ggml_nbytes(v);
  1458. // HACK: mark tensor as allocated
  1459. k->data = v->data = (void *)(uintptr_t)1;
  1460. }
  1461. }
  1462. #endif // GGML_USE_CUBLAS
  1463. }
  1464. // allocate tensors
  1465. cache.buf = ggml_backend_alloc_ctx_tensors_from_buft(cache.ctx, llama_default_buffer_type(n_gpu_layers));
  1466. // buf may be NULL with full offload
  1467. if (cache.buf) {
  1468. // initialize the buffer to avoid NaNs in the padding
  1469. ggml_backend_buffer_clear(cache.buf, 0);
  1470. }
  1471. if (vram_kv_cache > 0) {
  1472. LLAMA_LOG_INFO("%s: VRAM kv self = %.2f MB\n", __func__, vram_kv_cache / 1024.0 / 1024.0);
  1473. }
  1474. GGML_UNUSED(i_gpu_start);
  1475. GGML_UNUSED(offload);
  1476. return true;
  1477. }
  1478. // find an empty slot of size "n_tokens" in the cache
  1479. // updates the cache head
  1480. // Note: On success, it's important that cache.head points
  1481. // to the first cell of the slot.
  1482. static bool llama_kv_cache_find_slot(
  1483. struct llama_kv_cache & cache,
  1484. const struct llama_batch & batch) {
  1485. const uint32_t n_ctx = cache.size;
  1486. const uint32_t n_tokens = batch.n_tokens;
  1487. if (n_tokens > n_ctx) {
  1488. LLAMA_LOG_ERROR("%s: n_tokens=%d > n_ctx=%d\n", __func__, n_tokens, n_ctx);
  1489. return false;
  1490. }
  1491. uint32_t n_tested = 0;
  1492. while (true) {
  1493. if (cache.head + n_tokens > n_ctx) {
  1494. n_tested += n_ctx - cache.head;
  1495. cache.head = 0;
  1496. continue;
  1497. }
  1498. bool found = true;
  1499. for (uint32_t i = 0; i < n_tokens; i++) {
  1500. if (cache.cells[cache.head + i].pos >= 0) {
  1501. found = false;
  1502. cache.head += i + 1;
  1503. n_tested += i + 1;
  1504. break;
  1505. }
  1506. }
  1507. if (found) {
  1508. break;
  1509. }
  1510. if (n_tested >= n_ctx) {
  1511. //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
  1512. return false;
  1513. }
  1514. }
  1515. for (uint32_t i = 0; i < n_tokens; i++) {
  1516. cache.cells[cache.head + i].pos = batch.pos[i];
  1517. for (int32_t j = 0; j < batch.n_seq_id[i]; j++) {
  1518. cache.cells[cache.head + i].seq_id.insert(batch.seq_id[i][j]);
  1519. }
  1520. }
  1521. cache.used += n_tokens;
  1522. return true;
  1523. }
  1524. // find how many cells are currently in use
  1525. static int32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
  1526. for (uint32_t i = cache.size - 1; i > 0; --i) {
  1527. if (cache.cells[i].pos >= 0 && !cache.cells[i].seq_id.empty()) {
  1528. return i + 1;
  1529. }
  1530. }
  1531. return 0;
  1532. }
  1533. static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
  1534. for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
  1535. cache.cells[i].pos = -1;
  1536. cache.cells[i].seq_id.clear();
  1537. }
  1538. cache.head = 0;
  1539. cache.used = 0;
  1540. }
  1541. static void llama_kv_cache_seq_rm(
  1542. struct llama_kv_cache & cache,
  1543. llama_seq_id seq_id,
  1544. llama_pos p0,
  1545. llama_pos p1) {
  1546. uint32_t new_head = cache.size;
  1547. if (p0 < 0) p0 = 0;
  1548. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1549. for (uint32_t i = 0; i < cache.size; ++i) {
  1550. if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1551. if (seq_id < 0) {
  1552. cache.cells[i].seq_id.clear();
  1553. } else if (cache.cells[i].has_seq_id(seq_id)) {
  1554. cache.cells[i].seq_id.erase(seq_id);
  1555. } else {
  1556. continue;
  1557. }
  1558. if (cache.cells[i].seq_id.empty()) {
  1559. // keep count of the number of used cells
  1560. if (cache.cells[i].pos >= 0) cache.used--;
  1561. cache.cells[i].pos = -1;
  1562. if (new_head == cache.size) new_head = i;
  1563. }
  1564. }
  1565. }
  1566. // If we freed up a slot, set head to it so searching can start there.
  1567. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  1568. }
  1569. static void llama_kv_cache_seq_cp(
  1570. struct llama_kv_cache & cache,
  1571. llama_seq_id seq_id_src,
  1572. llama_seq_id seq_id_dst,
  1573. llama_pos p0,
  1574. llama_pos p1) {
  1575. if (p0 < 0) p0 = 0;
  1576. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1577. cache.head = 0;
  1578. for (uint32_t i = 0; i < cache.size; ++i) {
  1579. if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1580. cache.cells[i].seq_id.insert(seq_id_dst);
  1581. }
  1582. }
  1583. }
  1584. static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
  1585. uint32_t new_head = cache.size;
  1586. for (uint32_t i = 0; i < cache.size; ++i) {
  1587. if (!cache.cells[i].has_seq_id(seq_id)) {
  1588. if (cache.cells[i].pos >= 0) cache.used--;
  1589. cache.cells[i].pos = -1;
  1590. cache.cells[i].seq_id.clear();
  1591. if (new_head == cache.size) new_head = i;
  1592. } else {
  1593. cache.cells[i].seq_id.clear();
  1594. cache.cells[i].seq_id.insert(seq_id);
  1595. }
  1596. }
  1597. // If we freed up a slot, set head to it so searching can start there.
  1598. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  1599. }
  1600. static void llama_kv_cache_seq_shift(
  1601. struct llama_kv_cache & cache,
  1602. llama_seq_id seq_id,
  1603. llama_pos p0,
  1604. llama_pos p1,
  1605. llama_pos delta) {
  1606. uint32_t new_head = cache.size;
  1607. if (p0 < 0) p0 = 0;
  1608. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1609. for (uint32_t i = 0; i < cache.size; ++i) {
  1610. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1611. cache.has_shift = true;
  1612. cache.cells[i].pos += delta;
  1613. cache.cells[i].delta += delta;
  1614. if (cache.cells[i].pos < 0) {
  1615. if (!cache.cells[i].seq_id.empty()) cache.used--;
  1616. cache.cells[i].pos = -1;
  1617. cache.cells[i].seq_id.clear();
  1618. if (new_head == cache.size) new_head = i;
  1619. }
  1620. }
  1621. }
  1622. // If we freed up a slot, set head to it so searching can start there.
  1623. // Otherwise we just start the next search from the beginning.
  1624. cache.head = new_head != cache.size ? new_head : 0;
  1625. }
  1626. //
  1627. // model loading and saving
  1628. //
  1629. enum llama_fver {
  1630. GGUF_FILE_VERSION_V1 = 1,
  1631. GGUF_FILE_VERSION_V2 = 2,
  1632. GGUF_FILE_VERSION_V3 = 3,
  1633. };
  1634. static const char * llama_file_version_name(llama_fver version) {
  1635. switch (version) {
  1636. case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
  1637. case GGUF_FILE_VERSION_V2: return "GGUF V2";
  1638. case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
  1639. }
  1640. return "unknown";
  1641. }
  1642. static std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
  1643. char buf[256];
  1644. snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
  1645. for (size_t i = 1; i < ne.size(); i++) {
  1646. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
  1647. }
  1648. return buf;
  1649. }
  1650. static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
  1651. char buf[256];
  1652. snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
  1653. for (int i = 1; i < GGML_MAX_DIMS; i++) {
  1654. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
  1655. }
  1656. return buf;
  1657. }
  1658. namespace GGUFMeta {
  1659. template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int)>
  1660. struct GKV_Base_Type {
  1661. static constexpr gguf_type gt = gt_;
  1662. static T getter(const gguf_context * ctx, const int kid) {
  1663. return gfun(ctx, kid);
  1664. }
  1665. };
  1666. template<typename T> struct GKV_Base;
  1667. template<> struct GKV_Base<bool >: GKV_Base_Type<bool, GGUF_TYPE_BOOL, gguf_get_val_bool> {};
  1668. template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, GGUF_TYPE_UINT8, gguf_get_val_u8 > {};
  1669. template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, GGUF_TYPE_UINT16, gguf_get_val_u16 > {};
  1670. template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, GGUF_TYPE_UINT32, gguf_get_val_u32 > {};
  1671. template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, GGUF_TYPE_UINT64, gguf_get_val_u64 > {};
  1672. template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, GGUF_TYPE_INT8, gguf_get_val_i8 > {};
  1673. template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, GGUF_TYPE_INT16, gguf_get_val_i16 > {};
  1674. template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, GGUF_TYPE_INT32, gguf_get_val_i32 > {};
  1675. template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, GGUF_TYPE_INT64, gguf_get_val_i64 > {};
  1676. template<> struct GKV_Base<float >: GKV_Base_Type<float, GGUF_TYPE_FLOAT32, gguf_get_val_f32 > {};
  1677. template<> struct GKV_Base<double >: GKV_Base_Type<double, GGUF_TYPE_FLOAT64, gguf_get_val_f64 > {};
  1678. template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, GGUF_TYPE_STRING, gguf_get_val_str > {};
  1679. template<> struct GKV_Base<std::string> {
  1680. static constexpr gguf_type gt = GGUF_TYPE_STRING;
  1681. static std::string getter(const gguf_context * ctx, const int kid) {
  1682. return gguf_get_val_str(ctx, kid);
  1683. }
  1684. };
  1685. struct ArrayInfo{
  1686. const gguf_type gt;
  1687. const size_t length;
  1688. const void * data;
  1689. };
  1690. template<> struct GKV_Base<ArrayInfo> {
  1691. public:
  1692. static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
  1693. static ArrayInfo getter(const gguf_context *ctx, const int k) {
  1694. return ArrayInfo {
  1695. gguf_get_arr_type(ctx, k),
  1696. size_t(gguf_get_arr_n(ctx, k)),
  1697. gguf_get_arr_data(ctx, k),
  1698. };
  1699. }
  1700. };
  1701. template<typename T>
  1702. class GKV: public GKV_Base<T> {
  1703. GKV() = delete;
  1704. public:
  1705. static T get_kv(const gguf_context * ctx, const int k) {
  1706. const enum gguf_type kt = gguf_get_kv_type(ctx, k);
  1707. if (kt != GKV::gt) {
  1708. throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
  1709. gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt)));
  1710. }
  1711. return GKV::getter(ctx, k);
  1712. }
  1713. static const char * override_type_to_str(const llama_model_kv_override_type ty) {
  1714. switch (ty) {
  1715. case LLAMA_KV_OVERRIDE_BOOL: return "bool";
  1716. case LLAMA_KV_OVERRIDE_INT: return "int";
  1717. case LLAMA_KV_OVERRIDE_FLOAT: return "float";
  1718. }
  1719. return "unknown";
  1720. }
  1721. static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override *override) {
  1722. if (!override) { return false; }
  1723. if (override->tag == expected_type) {
  1724. LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
  1725. __func__, override_type_to_str(override->tag), override->key);
  1726. switch (override->tag) {
  1727. case LLAMA_KV_OVERRIDE_BOOL: {
  1728. printf("%s\n", override->bool_value ? "true" : "false");
  1729. } break;
  1730. case LLAMA_KV_OVERRIDE_INT: {
  1731. printf("%" PRId64 "\n", override->int_value);
  1732. } break;
  1733. case LLAMA_KV_OVERRIDE_FLOAT: {
  1734. printf("%.6f\n", override->float_value);
  1735. } break;
  1736. default:
  1737. // Shouldn't be possible to end up here, but just in case...
  1738. throw std::runtime_error(
  1739. format("Unsupported attempt to override %s type for metadata key %s\n",
  1740. override_type_to_str(override->tag), override->key));
  1741. }
  1742. return true;
  1743. }
  1744. LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
  1745. __func__, override->key, override_type_to_str(expected_type), override_type_to_str(override->tag));
  1746. return false;
  1747. }
  1748. template<typename OT>
  1749. static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
  1750. try_override(OT & target, const struct llama_model_kv_override *override) {
  1751. if (validate_override(LLAMA_KV_OVERRIDE_BOOL, override)) {
  1752. target = override->bool_value;
  1753. return true;
  1754. }
  1755. return false;
  1756. }
  1757. template<typename OT>
  1758. static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
  1759. try_override(OT & target, const struct llama_model_kv_override *override) {
  1760. if (validate_override(LLAMA_KV_OVERRIDE_INT, override)) {
  1761. target = override->int_value;
  1762. return true;
  1763. }
  1764. return false;
  1765. }
  1766. template<typename OT>
  1767. static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
  1768. try_override(T & target, const struct llama_model_kv_override *override) {
  1769. if (validate_override(LLAMA_KV_OVERRIDE_FLOAT, override)) {
  1770. target = override->float_value;
  1771. return true;
  1772. }
  1773. return false;
  1774. }
  1775. template<typename OT>
  1776. static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
  1777. try_override(T & target, const struct llama_model_kv_override *override) {
  1778. (void)target;
  1779. (void)override;
  1780. if (!override) { return false; }
  1781. // Currently, we should never end up here so it would be a bug if we do.
  1782. throw std::runtime_error(format("Unsupported attempt to override string type for metadata key %s\n",
  1783. override ? override->key : "NULL"));
  1784. }
  1785. static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override *override = nullptr) {
  1786. if (try_override<T>(target, override)) {
  1787. return true;
  1788. }
  1789. if (k < 0) { return false; }
  1790. target = get_kv(ctx, k);
  1791. return true;
  1792. }
  1793. static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override *override = nullptr) {
  1794. return set(ctx, gguf_find_key(ctx, key), target, override);
  1795. }
  1796. static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override *override = nullptr) {
  1797. return set(ctx, key.c_str(), target, override);
  1798. }
  1799. };
  1800. }
  1801. struct llama_model_loader {
  1802. int n_kv = 0;
  1803. int n_tensors = 0;
  1804. int n_created = 0;
  1805. int64_t n_elements = 0;
  1806. size_t n_bytes = 0;
  1807. bool use_mmap = false;
  1808. llama_file file;
  1809. llama_ftype ftype;
  1810. llama_fver fver;
  1811. std::unique_ptr<llama_mmap> mapping;
  1812. std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
  1813. struct gguf_context * ctx_gguf = NULL;
  1814. struct ggml_context * ctx_meta = NULL;
  1815. std::string arch_name;
  1816. LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
  1817. llama_model_loader(const std::string & fname, bool use_mmap, const struct llama_model_kv_override * param_overrides_p) : file(fname.c_str(), "rb") {
  1818. struct gguf_init_params params = {
  1819. /*.no_alloc = */ true,
  1820. /*.ctx = */ &ctx_meta,
  1821. };
  1822. if (param_overrides_p != nullptr) {
  1823. for (const struct llama_model_kv_override *p = param_overrides_p; p->key[0] != 0; p++) {
  1824. kv_overrides.insert({std::string(p->key), *p});
  1825. }
  1826. }
  1827. ctx_gguf = gguf_init_from_file(fname.c_str(), params);
  1828. if (!ctx_gguf) {
  1829. throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
  1830. }
  1831. get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
  1832. llm_kv = LLM_KV(llm_arch_from_string(arch_name));
  1833. n_kv = gguf_get_n_kv(ctx_gguf);
  1834. n_tensors = gguf_get_n_tensors(ctx_gguf);
  1835. fver = (enum llama_fver ) gguf_get_version(ctx_gguf);
  1836. for (int i = 0; i < n_tensors; i++) {
  1837. const char * name = gguf_get_tensor_name(ctx_gguf, i);
  1838. struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name);
  1839. n_elements += ggml_nelements(t);
  1840. n_bytes += ggml_nbytes(t);
  1841. }
  1842. LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
  1843. __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
  1844. // determine file type based on the number of tensors for each quantization and print meta data
  1845. // TODO: make optional
  1846. {
  1847. std::map<enum ggml_type, uint32_t> n_type;
  1848. uint32_t n_type_max = 0;
  1849. enum ggml_type type_max = GGML_TYPE_F32;
  1850. for (int i = 0; i < n_tensors; i++) {
  1851. enum ggml_type type = gguf_get_tensor_type(ctx_gguf, i);
  1852. n_type[type]++;
  1853. if (n_type_max < n_type[type]) {
  1854. n_type_max = n_type[type];
  1855. type_max = type;
  1856. }
  1857. // LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str());
  1858. }
  1859. switch (type_max) {
  1860. case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
  1861. case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
  1862. case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
  1863. case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
  1864. case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
  1865. case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
  1866. case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
  1867. case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
  1868. case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
  1869. case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
  1870. case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
  1871. case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
  1872. default:
  1873. {
  1874. LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
  1875. ftype = LLAMA_FTYPE_ALL_F32;
  1876. } break;
  1877. }
  1878. // this is a way to mark that we have "guessed" the file type
  1879. ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
  1880. {
  1881. const int kid = gguf_find_key(ctx_gguf, "general.file_type");
  1882. if (kid >= 0) {
  1883. ftype = (llama_ftype) gguf_get_val_u32(ctx_gguf, kid);
  1884. }
  1885. }
  1886. LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
  1887. for (int i = 0; i < n_kv; i++) {
  1888. const char * name = gguf_get_key(ctx_gguf, i);
  1889. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  1890. const std::string type_name =
  1891. type == GGUF_TYPE_ARRAY
  1892. ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx_gguf, i)), gguf_get_arr_n(ctx_gguf, i))
  1893. : gguf_type_name(type);
  1894. std::string value = gguf_kv_to_str(ctx_gguf, i);
  1895. const size_t MAX_VALUE_LEN = 40;
  1896. if (value.size() > MAX_VALUE_LEN) {
  1897. value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
  1898. }
  1899. replace_all(value, "\n", "\\n");
  1900. LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
  1901. }
  1902. // print type counts
  1903. for (auto & kv : n_type) {
  1904. if (kv.second == 0) {
  1905. continue;
  1906. }
  1907. LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
  1908. }
  1909. }
  1910. if (!llama_mmap::SUPPORTED) {
  1911. LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
  1912. use_mmap = false;
  1913. }
  1914. this->use_mmap = use_mmap;
  1915. }
  1916. ~llama_model_loader() {
  1917. if (ctx_gguf) {
  1918. gguf_free(ctx_gguf);
  1919. }
  1920. if (ctx_meta) {
  1921. ggml_free(ctx_meta);
  1922. }
  1923. }
  1924. template<typename T>
  1925. typename std::enable_if<std::is_integral<T>::value, bool>::type
  1926. get_arr_n(const std::string & key, T & result, const bool required = true) {
  1927. const int kid = gguf_find_key(ctx_gguf, key.c_str());
  1928. if (kid < 0) {
  1929. if (required) {
  1930. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  1931. }
  1932. return false;
  1933. }
  1934. struct GGUFMeta::ArrayInfo arr_info =
  1935. GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(ctx_gguf, kid);
  1936. result = arr_info.length;
  1937. return true;
  1938. }
  1939. template<typename T>
  1940. typename std::enable_if<std::is_integral<T>::value, bool>::type
  1941. get_arr_n(const enum llm_kv kid, T & result, const bool required = true) {
  1942. return get_arr_n(llm_kv(kid), result, required);
  1943. }
  1944. template<typename T>
  1945. bool get_key(const std::string & key, T & result, const bool required = true) {
  1946. auto it = kv_overrides.find(key);
  1947. const struct llama_model_kv_override * override =
  1948. it != kv_overrides.end() ? &it->second : nullptr;
  1949. const bool found = GGUFMeta::GKV<T>::set(ctx_gguf, key, result, override);
  1950. if (required && !found) {
  1951. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  1952. }
  1953. return found;
  1954. }
  1955. template<typename T>
  1956. bool get_key(const enum llm_kv kid, T & result, const bool required = true) {
  1957. return get_key(llm_kv(kid), result, required);
  1958. }
  1959. std::string get_arch_name() const {
  1960. return arch_name;
  1961. }
  1962. enum llm_arch get_arch() const {
  1963. return llm_kv.arch;
  1964. }
  1965. const char * get_tensor_name(int i) const {
  1966. return gguf_get_tensor_name(ctx_gguf, i);
  1967. }
  1968. struct ggml_tensor * get_tensor_meta(const char * name) const {
  1969. return ggml_get_tensor(ctx_meta, name);
  1970. }
  1971. struct ggml_tensor * get_tensor_meta(int i) const {
  1972. return get_tensor_meta(get_tensor_name(i));
  1973. }
  1974. struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend_type backend) {
  1975. struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta);
  1976. tensor->backend = backend; // TODO: ggml_set_backend
  1977. ggml_set_name(tensor, ggml_get_name(meta));
  1978. n_created++;
  1979. return tensor;
  1980. }
  1981. struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool required = true) {
  1982. struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
  1983. if (cur == NULL) {
  1984. if (!required) {
  1985. return NULL;
  1986. }
  1987. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
  1988. }
  1989. if (backend == GGML_BACKEND_GPU_SPLIT) {
  1990. if (ne.size() == 1) {
  1991. throw std::runtime_error(format("%s: 1-dimensional tensor '%s' cannot be split on the GPU", __func__, name.c_str()));
  1992. }
  1993. }
  1994. {
  1995. bool is_ok = true;
  1996. for (size_t i = 0; i < ne.size(); ++i) {
  1997. if (ne[i] != cur->ne[i]) {
  1998. is_ok = false;
  1999. break;
  2000. }
  2001. }
  2002. if (!is_ok) {
  2003. throw std::runtime_error(
  2004. format("%s: tensor '%s' has wrong shape; expected %s, got %s",
  2005. __func__, name.c_str(),
  2006. llama_format_tensor_shape(ne).c_str(),
  2007. llama_format_tensor_shape(cur).c_str()));
  2008. }
  2009. }
  2010. return create_tensor_for(ctx, cur, backend);
  2011. }
  2012. void done_getting_tensors() const {
  2013. if (n_created != n_tensors) {
  2014. throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
  2015. }
  2016. }
  2017. size_t file_offset(const char * name) const {
  2018. const int idx = gguf_find_tensor(ctx_gguf, name);
  2019. if (idx < 0) {
  2020. throw std::runtime_error(format("%s: tensor '%s' not found in the file", __func__, name));
  2021. }
  2022. return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx);
  2023. }
  2024. void init_mapping(bool prefetch = true) {
  2025. /*
  2026. // prefetch only CPU tensors
  2027. if (use_mmap) {
  2028. size_t size_pref = 0; // prefetch
  2029. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  2030. struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
  2031. if (cur->backend == GGML_BACKEND_CPU) {
  2032. size_t tensor_end = gguf_get_tensor_offset(ctx_gguf, i) + ggml_nbytes(cur);
  2033. size_pref = std::max(size_pref, tensor_end);
  2034. }
  2035. }
  2036. mapping.reset(new llama_mmap(&file, gguf_get_data_offset(ctx_gguf) + size_pref, ggml_is_numa()));
  2037. }
  2038. */
  2039. // prefetch the whole file - all the data is needed anyway
  2040. if (use_mmap) {
  2041. mapping.reset(new llama_mmap(&file, prefetch ? -1 : 0, ggml_is_numa()));
  2042. }
  2043. }
  2044. // for backwards compatibility, does not support ggml-backend
  2045. void load_data_for(struct ggml_tensor * cur) const {
  2046. const size_t offs = file_offset(ggml_get_name(cur));
  2047. if (use_mmap && mapping) {
  2048. GGML_ASSERT(cur->data == nullptr);
  2049. cur->data = (uint8_t *)mapping->addr + offs;
  2050. } else {
  2051. GGML_ASSERT(cur->data != nullptr);
  2052. file.seek(offs, SEEK_SET);
  2053. file.read_raw(cur->data, ggml_nbytes(cur));
  2054. }
  2055. }
  2056. // Returns false if cancelled by progress_callback
  2057. bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) const {
  2058. size_t size_data = 0;
  2059. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  2060. struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
  2061. size_data += ggml_nbytes(cur);
  2062. }
  2063. if (use_mmap && buf_mmap) {
  2064. if (lmlock) {
  2065. lmlock->init(mapping->addr);
  2066. }
  2067. }
  2068. #if (defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)) || defined(GGML_USE_CLBLAST)
  2069. const bool legacy_offload = true;
  2070. #else
  2071. const bool legacy_offload = false;
  2072. #endif
  2073. std::vector<no_init<uint8_t>> read_buf;
  2074. size_t size_done = 0;
  2075. size_t mmap_first = -1;
  2076. size_t mmap_last = 0;
  2077. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  2078. struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
  2079. GGML_ASSERT(cur); // unused tensors should have been caught by load_data already
  2080. if (progress_callback) {
  2081. if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
  2082. return false;
  2083. }
  2084. }
  2085. const size_t offs = file_offset(ggml_get_name(cur));
  2086. if (!legacy_offload || cur->backend == GGML_BACKEND_CPU) {
  2087. if (use_mmap && mapping) {
  2088. if (buf_mmap) {
  2089. ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + offs);
  2090. if (lmlock) {
  2091. lmlock->grow_to(offs + ggml_nbytes(cur));
  2092. }
  2093. mmap_first = std::min(mmap_first, offs);
  2094. mmap_last = std::max(mmap_last, offs + ggml_nbytes(cur));
  2095. } else {
  2096. ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + offs, 0, ggml_nbytes(cur));
  2097. }
  2098. } else {
  2099. if (ggml_backend_buffer_is_host(cur->buffer)) {
  2100. file.seek(offs, SEEK_SET);
  2101. file.read_raw(cur->data, ggml_nbytes(cur));
  2102. } else {
  2103. read_buf.resize(ggml_nbytes(cur));
  2104. file.seek(offs, SEEK_SET);
  2105. file.read_raw(read_buf.data(), ggml_nbytes(cur));
  2106. ggml_backend_tensor_set(cur, read_buf.data(), 0, ggml_nbytes(cur));
  2107. }
  2108. }
  2109. } else {
  2110. // HACK: mark tensor as allocated
  2111. cur->data = (void *)(uintptr_t)1;
  2112. void * data;
  2113. if (use_mmap && mapping) {
  2114. data = (uint8_t *) mapping->addr + offs;
  2115. } else {
  2116. read_buf.resize(ggml_nbytes(cur));
  2117. file.seek(offs, SEEK_SET);
  2118. file.read_raw(read_buf.data(), ggml_nbytes(cur));
  2119. data = read_buf.data();
  2120. }
  2121. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  2122. ggml_cuda_transform_tensor(data, cur);
  2123. #elif defined(GGML_USE_CLBLAST)
  2124. GGML_ASSERT(cur->backend == GGML_BACKEND_GPU);
  2125. ggml_cl_transform_tensor(data, cur);
  2126. #else
  2127. GGML_ASSERT(!"GPU tensor without a GPU backend");
  2128. GGML_UNUSED(data);
  2129. #endif
  2130. }
  2131. size_done += ggml_nbytes(cur);
  2132. }
  2133. // unmap offloaded tensors and metadata
  2134. if (use_mmap && mapping) {
  2135. mapping->unmap_fragment(0, mmap_first);
  2136. mapping->unmap_fragment(mmap_last, mapping->size);
  2137. }
  2138. if (progress_callback) {
  2139. // Even though the model is done loading, we still honor
  2140. // cancellation since we need to free allocations.
  2141. return progress_callback(1.0f, progress_callback_user_data);
  2142. }
  2143. return true;
  2144. }
  2145. };
  2146. //
  2147. // load LLaMA models
  2148. //
  2149. static std::string llama_model_arch_name(llm_arch arch) {
  2150. auto it = LLM_ARCH_NAMES.find(arch);
  2151. if (it == LLM_ARCH_NAMES.end()) {
  2152. return "unknown";
  2153. }
  2154. return it->second;
  2155. }
  2156. static std::string llama_model_ftype_name(llama_ftype ftype) {
  2157. if (ftype & LLAMA_FTYPE_GUESSED) {
  2158. return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
  2159. }
  2160. switch (ftype) {
  2161. case LLAMA_FTYPE_ALL_F32: return "all F32";
  2162. case LLAMA_FTYPE_MOSTLY_F16: return "F16";
  2163. case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
  2164. case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
  2165. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  2166. return "Q4_1, some F16";
  2167. case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
  2168. case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
  2169. case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
  2170. // K-quants
  2171. case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K";
  2172. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
  2173. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
  2174. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
  2175. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
  2176. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
  2177. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
  2178. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
  2179. case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
  2180. default: return "unknown, may not work";
  2181. }
  2182. }
  2183. static const char * llama_model_type_name(e_model type) {
  2184. switch (type) {
  2185. case MODEL_1B: return "1B";
  2186. case MODEL_3B: return "3B";
  2187. case MODEL_7B: return "7B";
  2188. case MODEL_8B: return "8B";
  2189. case MODEL_13B: return "13B";
  2190. case MODEL_15B: return "15B";
  2191. case MODEL_30B: return "30B";
  2192. case MODEL_34B: return "34B";
  2193. case MODEL_40B: return "40B";
  2194. case MODEL_65B: return "65B";
  2195. case MODEL_70B: return "70B";
  2196. case MODEL_SMALL: return "0.1B";
  2197. case MODEL_MEDIUM: return "0.4B";
  2198. case MODEL_LARGE: return "0.8B";
  2199. case MODEL_XL: return "1.5B";
  2200. default: return "?B";
  2201. }
  2202. }
  2203. static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
  2204. model.arch = ml.get_arch();
  2205. if (model.arch == LLM_ARCH_UNKNOWN) {
  2206. throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
  2207. }
  2208. }
  2209. static void llm_load_hparams(
  2210. llama_model_loader & ml,
  2211. llama_model & model) {
  2212. auto & hparams = model.hparams;
  2213. const gguf_context * ctx = ml.ctx_gguf;
  2214. // get metadata as string
  2215. for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
  2216. enum gguf_type type = gguf_get_kv_type(ctx, i);
  2217. if (type == GGUF_TYPE_ARRAY) {
  2218. continue;
  2219. }
  2220. const char * name = gguf_get_key(ctx, i);
  2221. const std::string value = gguf_kv_to_str(ctx, i);
  2222. model.gguf_kv.emplace(name, value);
  2223. }
  2224. // get general kv
  2225. ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
  2226. // get hparams kv
  2227. ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
  2228. ml.get_key (LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
  2229. ml.get_key (LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
  2230. ml.get_key (LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
  2231. ml.get_key (LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head);
  2232. ml.get_key (LLM_KV_BLOCK_COUNT, hparams.n_layer);
  2233. ml.get_key (LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
  2234. ml.get_key (LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
  2235. GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
  2236. GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
  2237. if (hparams.n_expert > 0) {
  2238. GGML_ASSERT(hparams.n_expert_used > 0);
  2239. } else {
  2240. GGML_ASSERT(hparams.n_expert_used == 0);
  2241. }
  2242. // n_head_kv is optional, default to n_head
  2243. hparams.n_head_kv = hparams.n_head;
  2244. ml.get_key(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv, false);
  2245. bool rope_finetuned = false;
  2246. ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
  2247. hparams.rope_finetuned = rope_finetuned;
  2248. hparams.n_yarn_orig_ctx = hparams.n_ctx_train;
  2249. ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_yarn_orig_ctx, false);
  2250. // rope_freq_base (optional)
  2251. hparams.rope_freq_base_train = 10000.0f;
  2252. ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
  2253. std::string rope_scaling("linear");
  2254. ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
  2255. hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
  2256. GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_UNSPECIFIED);
  2257. // rope_freq_scale (inverse of the kv) is optional
  2258. float ropescale = 0.0f;
  2259. if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
  2260. // try the old key name
  2261. ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
  2262. }
  2263. hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
  2264. // sanity check for n_rot (optional)
  2265. {
  2266. hparams.n_rot = hparams.n_embd / hparams.n_head;
  2267. ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
  2268. if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
  2269. if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
  2270. throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
  2271. }
  2272. }
  2273. // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
  2274. // gpt-j n_rot = rotary_dim
  2275. }
  2276. // arch-specific KVs
  2277. switch (model.arch) {
  2278. case LLM_ARCH_LLAMA:
  2279. {
  2280. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2281. switch (hparams.n_layer) {
  2282. case 22: model.type = e_model::MODEL_1B; break;
  2283. case 26: model.type = e_model::MODEL_3B; break;
  2284. case 32: model.type = e_model::MODEL_7B; break;
  2285. case 40: model.type = e_model::MODEL_13B; break;
  2286. case 48: model.type = e_model::MODEL_34B; break;
  2287. case 60: model.type = e_model::MODEL_30B; break;
  2288. case 80: model.type = hparams.n_head == hparams.n_head_kv ? e_model::MODEL_65B : e_model::MODEL_70B; break;
  2289. default: model.type = e_model::MODEL_UNKNOWN;
  2290. }
  2291. } break;
  2292. case LLM_ARCH_FALCON:
  2293. {
  2294. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2295. switch (hparams.n_layer) {
  2296. case 32: model.type = e_model::MODEL_7B; break;
  2297. case 60: model.type = e_model::MODEL_40B; break;
  2298. default: model.type = e_model::MODEL_UNKNOWN;
  2299. }
  2300. } break;
  2301. case LLM_ARCH_BAICHUAN:
  2302. {
  2303. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2304. switch (hparams.n_layer) {
  2305. case 32: model.type = e_model::MODEL_7B; break;
  2306. case 40: model.type = e_model::MODEL_13B; break;
  2307. default: model.type = e_model::MODEL_UNKNOWN;
  2308. }
  2309. } break;
  2310. case LLM_ARCH_STARCODER:
  2311. {
  2312. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2313. switch (hparams.n_layer) {
  2314. case 24: model.type = e_model::MODEL_1B; break;
  2315. case 36: model.type = e_model::MODEL_3B; break;
  2316. case 42: model.type = e_model::MODEL_7B; break;
  2317. case 40: model.type = e_model::MODEL_15B; break;
  2318. default: model.type = e_model::MODEL_UNKNOWN;
  2319. }
  2320. } break;
  2321. case LLM_ARCH_PERSIMMON:
  2322. {
  2323. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2324. switch (hparams.n_layer) {
  2325. case 36: model.type = e_model::MODEL_8B; break;
  2326. default: model.type = e_model::MODEL_UNKNOWN;
  2327. }
  2328. } break;
  2329. case LLM_ARCH_REFACT:
  2330. {
  2331. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2332. switch (hparams.n_layer) {
  2333. case 32: model.type = e_model::MODEL_1B; break;
  2334. default: model.type = e_model::MODEL_UNKNOWN;
  2335. }
  2336. } break;
  2337. case LLM_ARCH_BLOOM:
  2338. {
  2339. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2340. switch (hparams.n_layer) {
  2341. case 24: model.type = e_model::MODEL_1B; break;
  2342. case 30:
  2343. switch (hparams.n_embd) {
  2344. case 2560: model.type = e_model::MODEL_3B; break;
  2345. case 4096: model.type = e_model::MODEL_7B; break;
  2346. } break;
  2347. }
  2348. } break;
  2349. case LLM_ARCH_MPT:
  2350. {
  2351. hparams.f_clamp_kqv = 0.0f;
  2352. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2353. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  2354. ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
  2355. switch (hparams.n_layer) {
  2356. case 32: model.type = e_model::MODEL_7B; break;
  2357. case 48: model.type = e_model::MODEL_30B; break;
  2358. default: model.type = e_model::MODEL_UNKNOWN;
  2359. }
  2360. } break;
  2361. case LLM_ARCH_STABLELM:
  2362. {
  2363. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2364. switch (hparams.n_layer) {
  2365. case 32: model.type = e_model::MODEL_3B; break;
  2366. default: model.type = e_model::MODEL_UNKNOWN;
  2367. }
  2368. } break;
  2369. case LLM_ARCH_QWEN:
  2370. {
  2371. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2372. switch (hparams.n_layer) {
  2373. case 32: model.type = e_model::MODEL_7B; break;
  2374. case 40: model.type = e_model::MODEL_13B; break;
  2375. default: model.type = e_model::MODEL_UNKNOWN;
  2376. }
  2377. } break;
  2378. case LLM_ARCH_PHI2:
  2379. {
  2380. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2381. switch (hparams.n_layer) {
  2382. case 32: model.type = e_model::MODEL_3B; break;
  2383. default: model.type = e_model::MODEL_UNKNOWN;
  2384. }
  2385. } break;
  2386. case LLM_ARCH_PLAMO:
  2387. {
  2388. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2389. switch (hparams.n_layer) {
  2390. case 40: model.type = e_model::MODEL_13B; break;
  2391. default: model.type = e_model::MODEL_UNKNOWN;
  2392. }
  2393. } break;
  2394. case LLM_ARCH_GPT2:
  2395. {
  2396. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2397. switch (hparams.n_layer) {
  2398. case 12: model.type = e_model::MODEL_SMALL; break;
  2399. case 24: model.type = e_model::MODEL_MEDIUM; break;
  2400. case 36: model.type = e_model::MODEL_LARGE; break;
  2401. case 48: model.type = e_model::MODEL_XL; break;
  2402. default: model.type = e_model::MODEL_UNKNOWN;
  2403. }
  2404. } break;
  2405. default: (void)0;
  2406. }
  2407. model.ftype = ml.ftype;
  2408. }
  2409. // TODO: This should probably be in llama.h
  2410. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special = false);
  2411. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch);
  2412. static void llm_load_vocab(
  2413. llama_model_loader & ml,
  2414. llama_model & model) {
  2415. auto & vocab = model.vocab;
  2416. struct gguf_context * ctx = ml.ctx_gguf;
  2417. const auto kv = LLM_KV(model.arch);
  2418. const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
  2419. if (token_idx == -1) {
  2420. throw std::runtime_error("cannot find tokenizer vocab in model file\n");
  2421. }
  2422. const float * scores = nullptr;
  2423. const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
  2424. if (score_idx != -1) {
  2425. scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
  2426. }
  2427. const int * toktypes = nullptr;
  2428. const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
  2429. if (toktype_idx != -1) {
  2430. toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
  2431. }
  2432. // determine vocab type
  2433. {
  2434. std::string tokenizer_name;
  2435. ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_name);
  2436. if (tokenizer_name == "llama") {
  2437. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  2438. // default special tokens
  2439. vocab.special_bos_id = 1;
  2440. vocab.special_eos_id = 2;
  2441. vocab.special_unk_id = 0;
  2442. vocab.special_sep_id = -1;
  2443. vocab.special_pad_id = -1;
  2444. } else if (tokenizer_name == "gpt2") {
  2445. vocab.type = LLAMA_VOCAB_TYPE_BPE;
  2446. // read bpe merges and populate bpe ranks
  2447. const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
  2448. if (merges_keyidx == -1) {
  2449. throw std::runtime_error("cannot find tokenizer merges in model file\n");
  2450. }
  2451. const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
  2452. for (int i = 0; i < n_merges; i++) {
  2453. const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
  2454. GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
  2455. std::string first;
  2456. std::string second;
  2457. const size_t pos = word.find(' ', 1);
  2458. if (pos != std::string::npos) {
  2459. first = word.substr(0, pos);
  2460. second = word.substr(pos + 1);
  2461. }
  2462. vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
  2463. }
  2464. // default special tokens
  2465. vocab.special_bos_id = 11;
  2466. vocab.special_eos_id = 11;
  2467. vocab.special_unk_id = -1;
  2468. vocab.special_sep_id = -1;
  2469. vocab.special_pad_id = -1;
  2470. } else {
  2471. LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str());
  2472. LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__);
  2473. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  2474. }
  2475. }
  2476. const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
  2477. vocab.id_to_token.resize(n_vocab);
  2478. for (uint32_t i = 0; i < n_vocab; i++) {
  2479. std::string word = gguf_get_arr_str(ctx, token_idx, i);
  2480. GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
  2481. vocab.token_to_id[word] = i;
  2482. auto & token_data = vocab.id_to_token[i];
  2483. token_data.text = std::move(word);
  2484. token_data.score = scores ? scores[i] : 0.0f;
  2485. token_data.type = toktypes ? (llama_token_type) toktypes[i] : LLAMA_TOKEN_TYPE_NORMAL;
  2486. }
  2487. GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
  2488. // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
  2489. if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
  2490. vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
  2491. } else {
  2492. const std::vector<int> ids = llama_tokenize_internal(vocab, "\u010A", false);
  2493. GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
  2494. vocab.linefeed_id = ids[0];
  2495. }
  2496. // special tokens
  2497. {
  2498. const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
  2499. { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
  2500. { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
  2501. { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
  2502. { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
  2503. { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
  2504. };
  2505. for (const auto & it : special_token_types) {
  2506. const std::string & key = kv(std::get<0>(it));
  2507. int32_t & id = std::get<1>(it);
  2508. uint32_t new_id;
  2509. if (!ml.get_key(std::get<0>(it), new_id, false)) {
  2510. continue;
  2511. }
  2512. if (new_id >= vocab.id_to_token.size()) {
  2513. LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
  2514. __func__, key.c_str(), new_id, id);
  2515. } else {
  2516. id = new_id;
  2517. }
  2518. }
  2519. // Handle add_bos_token and add_eos_token
  2520. {
  2521. bool temp = true;
  2522. if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
  2523. vocab.special_add_bos = int(temp);
  2524. }
  2525. if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
  2526. vocab.special_add_eos = int(temp);
  2527. }
  2528. }
  2529. }
  2530. // build special tokens cache
  2531. {
  2532. // TODO: It is unclear (to me) at this point, whether special tokes are guaranteed to be of a deterministic type,
  2533. // and will always be correctly labeled in 'added_tokens.json' etc.
  2534. // The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
  2535. // to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
  2536. // are special tokens.
  2537. // From testing, this appears to correlate 1:1 with special tokens.
  2538. //
  2539. // Counting special tokens and verifying in only one direction
  2540. // is sufficient to detect difference in those two sets.
  2541. //
  2542. uint32_t special_tokens_count_by_type = 0;
  2543. uint32_t special_tokens_count_from_verification = 0;
  2544. bool special_tokens_definition_mismatch = false;
  2545. for (const auto & t : vocab.token_to_id) {
  2546. const auto & token = t.first;
  2547. const auto & id = t.second;
  2548. // Count all non-normal tokens in the vocab while iterating
  2549. if (vocab.id_to_token[id].type != LLAMA_TOKEN_TYPE_NORMAL) {
  2550. special_tokens_count_by_type++;
  2551. }
  2552. // Skip single character tokens
  2553. if (token.length() > 1) {
  2554. bool is_tokenizable = false;
  2555. // Split token string representation in two, in all possible ways
  2556. // and check if both halves can be matched to a valid token
  2557. for (unsigned i = 1; i < token.length();) {
  2558. const auto left = token.substr(0, i);
  2559. const auto right = token.substr(i);
  2560. // check if we didnt partition in the middle of a utf sequence
  2561. auto utf = utf8_len(left.at(left.length() - 1));
  2562. if (utf == 1) {
  2563. if (vocab.token_to_id.find(left) != vocab.token_to_id.end() &&
  2564. vocab.token_to_id.find(right) != vocab.token_to_id.end() ) {
  2565. is_tokenizable = true;
  2566. break;
  2567. }
  2568. i++;
  2569. } else {
  2570. // skip over the rest of multibyte utf sequence
  2571. i += utf - 1;
  2572. }
  2573. }
  2574. if (!is_tokenizable) {
  2575. // Some tokens are multibyte, but they are utf sequences with equivalent text length of 1
  2576. // it's faster to re-filter them here, since there are way less candidates now
  2577. // Calculate a total "utf" length of a token string representation
  2578. size_t utf8_str_len = 0;
  2579. for (unsigned i = 0; i < token.length();) {
  2580. utf8_str_len++;
  2581. i += utf8_len(token.at(i));
  2582. }
  2583. // And skip the ones which are one character
  2584. if (utf8_str_len > 1) {
  2585. // At this point what we have left are special tokens only
  2586. vocab.special_tokens_cache[token] = id;
  2587. // Count manually found special tokens
  2588. special_tokens_count_from_verification++;
  2589. // If this manually found special token is not marked as such, flag a mismatch
  2590. if (vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL) {
  2591. special_tokens_definition_mismatch = true;
  2592. }
  2593. }
  2594. }
  2595. }
  2596. }
  2597. if (special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type) {
  2598. LLAMA_LOG_WARN("%s: mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
  2599. __func__,
  2600. special_tokens_count_from_verification, vocab.id_to_token.size(),
  2601. special_tokens_count_by_type, vocab.id_to_token.size()
  2602. );
  2603. } else {
  2604. LLAMA_LOG_INFO("%s: special tokens definition check successful ( %u/%zu ).\n",
  2605. __func__,
  2606. special_tokens_count_from_verification, vocab.id_to_token.size()
  2607. );
  2608. }
  2609. }
  2610. }
  2611. static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
  2612. const auto & hparams = model.hparams;
  2613. const auto & vocab = model.vocab;
  2614. const auto rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
  2615. // hparams
  2616. LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
  2617. LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch).c_str());
  2618. LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, vocab.type == LLAMA_VOCAB_TYPE_SPM ? "SPM" : "BPE"); // TODO: fix
  2619. LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  2620. LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
  2621. LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
  2622. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  2623. LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
  2624. LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
  2625. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  2626. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
  2627. LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
  2628. LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
  2629. LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
  2630. LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
  2631. LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
  2632. LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
  2633. LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
  2634. LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
  2635. LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type.c_str());
  2636. LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
  2637. LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
  2638. LLAMA_LOG_INFO("%s: n_yarn_orig_ctx = %u\n", __func__, hparams.n_yarn_orig_ctx);
  2639. LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
  2640. LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
  2641. LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
  2642. LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
  2643. if (ml.n_bytes < GiB) {
  2644. LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  2645. } else {
  2646. LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  2647. }
  2648. // general kv
  2649. LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
  2650. // special tokens
  2651. if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
  2652. if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
  2653. if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
  2654. if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
  2655. if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
  2656. if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
  2657. }
  2658. // Returns false if cancelled by progress_callback
  2659. static bool llm_load_tensors(
  2660. llama_model_loader & ml,
  2661. llama_model & model,
  2662. int n_gpu_layers,
  2663. int main_gpu,
  2664. const float * tensor_split,
  2665. bool use_mlock,
  2666. llama_progress_callback progress_callback,
  2667. void * progress_callback_user_data) {
  2668. model.t_start_us = ggml_time_us();
  2669. auto & ctx = model.ctx;
  2670. auto & hparams = model.hparams;
  2671. model.n_gpu_layers = n_gpu_layers;
  2672. size_t ctx_size = ggml_tensor_overhead() * ml.n_tensors;
  2673. LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, ctx_size/1024.0/1024.0);
  2674. // create the ggml context
  2675. {
  2676. struct ggml_init_params params = {
  2677. /*.mem_size =*/ ctx_size,
  2678. /*.mem_buffer =*/ NULL,
  2679. /*.no_alloc =*/ true,
  2680. };
  2681. model.ctx = ggml_init(params);
  2682. if (!model.ctx) {
  2683. throw std::runtime_error(format("ggml_init() failed"));
  2684. }
  2685. }
  2686. (void) main_gpu;
  2687. enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU;
  2688. enum ggml_backend_type llama_backend_offload_split = GGML_BACKEND_CPU;
  2689. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  2690. if (ggml_cublas_loaded()) {
  2691. LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__);
  2692. ggml_cuda_set_main_device(main_gpu);
  2693. llama_backend_offload = GGML_BACKEND_GPU;
  2694. llama_backend_offload_split = GGML_BACKEND_GPU_SPLIT;
  2695. }
  2696. #elif defined(GGML_USE_CLBLAST)
  2697. LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
  2698. llama_backend_offload = GGML_BACKEND_GPU;
  2699. llama_backend_offload_split = GGML_BACKEND_GPU;
  2700. #endif
  2701. // create tensors for the weights
  2702. {
  2703. const int64_t n_embd = hparams.n_embd;
  2704. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  2705. const int64_t n_layer = hparams.n_layer;
  2706. const int64_t n_vocab = hparams.n_vocab;
  2707. const auto tn = LLM_TN(model.arch);
  2708. switch (model.arch) {
  2709. case LLM_ARCH_LLAMA:
  2710. case LLM_ARCH_REFACT:
  2711. {
  2712. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2713. // output
  2714. {
  2715. ggml_backend_type backend_norm;
  2716. ggml_backend_type backend_output;
  2717. if (n_gpu_layers > int(n_layer)) {
  2718. backend_norm = llama_backend_offload;
  2719. backend_output = llama_backend_offload_split;
  2720. } else {
  2721. backend_norm = GGML_BACKEND_CPU;
  2722. backend_output = GGML_BACKEND_CPU;
  2723. }
  2724. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2725. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2726. }
  2727. const uint32_t n_ff = hparams.n_ff;
  2728. const int i_gpu_start = n_layer - n_gpu_layers;
  2729. model.layers.resize(n_layer);
  2730. for (uint32_t i = 0; i < n_layer; ++i) {
  2731. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2732. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2733. auto & layer = model.layers[i];
  2734. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2735. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  2736. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2737. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2738. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2739. // optional bias tensors
  2740. layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, false);
  2741. layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, false);
  2742. layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, false);
  2743. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, false);
  2744. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2745. layer.ffn_gate_inp = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd}, backend, false);
  2746. if (layer.ffn_gate_inp == nullptr) {
  2747. GGML_ASSERT(hparams.n_expert == 0);
  2748. GGML_ASSERT(hparams.n_expert_used == 0);
  2749. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  2750. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2751. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2752. } else {
  2753. GGML_ASSERT(hparams.n_expert > 0);
  2754. GGML_ASSERT(hparams.n_expert_used > 0);
  2755. // MoE branch
  2756. for (uint32_t x = 0; x < hparams.n_expert; ++x) {
  2757. layer.ffn_gate_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), {n_embd, n_ff}, backend_split);
  2758. layer.ffn_down_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd}, backend_split);
  2759. layer.ffn_up_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), {n_embd, n_ff}, backend_split);
  2760. }
  2761. }
  2762. }
  2763. } break;
  2764. case LLM_ARCH_BAICHUAN:
  2765. {
  2766. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2767. {
  2768. ggml_backend_type backend_norm;
  2769. ggml_backend_type backend_output;
  2770. if (n_gpu_layers > int(n_layer)) {
  2771. backend_norm = llama_backend_offload;
  2772. backend_output = llama_backend_offload_split;
  2773. } else {
  2774. backend_norm = GGML_BACKEND_CPU;
  2775. backend_output = GGML_BACKEND_CPU;
  2776. }
  2777. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2778. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2779. }
  2780. const uint32_t n_ff = hparams.n_ff;
  2781. const int i_gpu_start = n_layer - n_gpu_layers;
  2782. model.layers.resize(n_layer);
  2783. for (uint32_t i = 0; i < n_layer; ++i) {
  2784. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2785. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2786. auto & layer = model.layers[i];
  2787. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2788. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  2789. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2790. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2791. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2792. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2793. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  2794. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2795. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2796. }
  2797. } break;
  2798. case LLM_ARCH_FALCON:
  2799. {
  2800. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2801. // output
  2802. {
  2803. ggml_backend_type backend_norm;
  2804. ggml_backend_type backend_output;
  2805. if (n_gpu_layers > int(n_layer)) {
  2806. backend_norm = llama_backend_offload;
  2807. backend_output = llama_backend_offload_split;
  2808. } else {
  2809. backend_norm = GGML_BACKEND_CPU;
  2810. backend_output = GGML_BACKEND_CPU;
  2811. }
  2812. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2813. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2814. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2815. }
  2816. const uint32_t n_ff = hparams.n_ff;
  2817. const int i_gpu_start = n_layer - n_gpu_layers;
  2818. model.layers.resize(n_layer);
  2819. for (uint32_t i = 0; i < n_layer; ++i) {
  2820. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2821. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2822. auto & layer = model.layers[i];
  2823. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2824. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2825. if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i).c_str()) >= 0) {
  2826. layer.attn_norm_2 = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, backend);
  2827. layer.attn_norm_2_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, backend);
  2828. }
  2829. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2830. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2831. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2832. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2833. }
  2834. } break;
  2835. case LLM_ARCH_STARCODER:
  2836. {
  2837. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2838. model.pos_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU);
  2839. // output
  2840. {
  2841. ggml_backend_type backend_norm;
  2842. ggml_backend_type backend_output;
  2843. if (n_gpu_layers > int(n_layer)) {
  2844. backend_norm = llama_backend_offload;
  2845. backend_output = llama_backend_offload_split;
  2846. } else {
  2847. backend_norm = GGML_BACKEND_CPU;
  2848. backend_output = GGML_BACKEND_CPU;
  2849. }
  2850. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2851. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2852. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2853. }
  2854. const uint32_t n_ff = hparams.n_ff;
  2855. const int i_gpu_start = n_layer - n_gpu_layers;
  2856. model.layers.resize(n_layer);
  2857. for (uint32_t i = 0; i < n_layer; ++i) {
  2858. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2859. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2860. auto & layer = model.layers[i];
  2861. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2862. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2863. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2864. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  2865. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2866. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  2867. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2868. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  2869. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  2870. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  2871. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2872. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  2873. }
  2874. } break;
  2875. case LLM_ARCH_PERSIMMON:
  2876. {
  2877. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2878. {
  2879. ggml_backend_type backend_norm;
  2880. ggml_backend_type backend_output;
  2881. if (n_gpu_layers > int(n_layer)) {
  2882. backend_norm = llama_backend_offload;
  2883. backend_output = llama_backend_offload_split;
  2884. } else {
  2885. backend_norm = GGML_BACKEND_CPU;
  2886. backend_output = GGML_BACKEND_CPU;
  2887. }
  2888. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2889. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2890. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2891. }
  2892. const uint32_t n_ff = hparams.n_ff;
  2893. const int i_gpu_start = n_layer - n_gpu_layers;
  2894. model.layers.resize(n_layer);
  2895. for (uint32_t i = 0; i < n_layer; ++i) {
  2896. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload;
  2897. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split;
  2898. auto & layer = model.layers[i];
  2899. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2900. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2901. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2902. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  2903. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2904. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  2905. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  2906. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  2907. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2908. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  2909. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2910. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  2911. layer.attn_q_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64}, backend);
  2912. layer.attn_q_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64}, backend);
  2913. layer.attn_k_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {64}, backend);
  2914. layer.attn_k_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64}, backend);
  2915. }
  2916. } break;
  2917. case LLM_ARCH_BLOOM:
  2918. {
  2919. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2920. model.tok_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, GGML_BACKEND_CPU);
  2921. model.tok_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, GGML_BACKEND_CPU);
  2922. // output
  2923. {
  2924. ggml_backend_type backend_norm;
  2925. ggml_backend_type backend_output;
  2926. if (n_gpu_layers > int(n_layer)) {
  2927. backend_norm = llama_backend_offload;
  2928. backend_output = llama_backend_offload_split;
  2929. } else {
  2930. backend_norm = GGML_BACKEND_CPU;
  2931. backend_output = GGML_BACKEND_CPU;
  2932. }
  2933. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2934. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2935. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2936. }
  2937. const uint32_t n_ff = hparams.n_ff;
  2938. const int i_gpu_start = n_layer - n_gpu_layers;
  2939. model.layers.resize(n_layer);
  2940. for (uint32_t i = 0; i < n_layer; ++i) {
  2941. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2942. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2943. auto & layer = model.layers[i];
  2944. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2945. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2946. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2947. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  2948. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2949. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  2950. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2951. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  2952. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  2953. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  2954. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2955. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  2956. }
  2957. } break;
  2958. case LLM_ARCH_MPT:
  2959. {
  2960. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2961. // output
  2962. {
  2963. ggml_backend_type backend_norm;
  2964. ggml_backend_type backend_output;
  2965. if (n_gpu_layers > int(n_layer)) {
  2966. backend_norm = llama_backend_offload;
  2967. backend_output = llama_backend_offload_split;
  2968. } else {
  2969. backend_norm = GGML_BACKEND_CPU;
  2970. backend_output = GGML_BACKEND_CPU;
  2971. }
  2972. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2973. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2974. }
  2975. const uint32_t n_ff = hparams.n_ff;
  2976. const int i_gpu_start = n_layer - n_gpu_layers;
  2977. model.layers.resize(n_layer);
  2978. for (uint32_t i = 0; i < n_layer; ++i) {
  2979. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2980. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2981. auto & layer = model.layers[i];
  2982. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2983. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2984. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2985. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2986. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2987. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2988. // AWQ ScaleActivation layer
  2989. layer.ffn_act = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, backend, false);
  2990. }
  2991. } break;
  2992. case LLM_ARCH_STABLELM:
  2993. {
  2994. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2995. // output
  2996. {
  2997. ggml_backend_type backend_norm;
  2998. ggml_backend_type backend_output;
  2999. if (n_gpu_layers > int(n_layer)) {
  3000. backend_norm = llama_backend_offload;
  3001. backend_output = llama_backend_offload_split;
  3002. } else {
  3003. backend_norm = GGML_BACKEND_CPU;
  3004. backend_output = GGML_BACKEND_CPU;
  3005. }
  3006. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  3007. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3008. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3009. }
  3010. const uint32_t n_ff = hparams.n_ff;
  3011. const int i_gpu_start = n_layer - n_gpu_layers;
  3012. model.layers.resize(n_layer);
  3013. for (uint32_t i = 0; i < n_layer; ++i) {
  3014. /*
  3015. llama_model_loader: - tensor 4: blk.0.attn_output.weight f16 [ 2560, 2560, 1, 1 ]
  3016. */
  3017. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3018. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3019. auto & layer = model.layers[i];
  3020. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3021. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  3022. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  3023. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  3024. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  3025. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3026. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  3027. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  3028. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  3029. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  3030. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3031. }
  3032. } break;
  3033. case LLM_ARCH_QWEN:
  3034. {
  3035. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  3036. {
  3037. ggml_backend_type backend_norm;
  3038. ggml_backend_type backend_output;
  3039. if (n_gpu_layers > int(n_layer)) {
  3040. backend_norm = llama_backend_offload;
  3041. backend_output = llama_backend_offload_split;
  3042. } else {
  3043. backend_norm = GGML_BACKEND_CPU;
  3044. backend_output = GGML_BACKEND_CPU;
  3045. }
  3046. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3047. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3048. }
  3049. const uint32_t n_ff = hparams.n_ff / 2;
  3050. const int i_gpu_start = n_layer - n_gpu_layers;
  3051. model.layers.resize(n_layer);
  3052. for (uint32_t i = 0; i < n_layer; ++i) {
  3053. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3054. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3055. auto & layer = model.layers[i];
  3056. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3057. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd * 3}, backend_split);
  3058. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd * 3}, backend);
  3059. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3060. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  3061. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  3062. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  3063. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3064. }
  3065. } break;
  3066. case LLM_ARCH_PHI2:
  3067. {
  3068. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  3069. // output
  3070. {
  3071. ggml_backend_type backend_norm;
  3072. ggml_backend_type backend_output;
  3073. if (n_gpu_layers > int(n_layer)) {
  3074. backend_norm = llama_backend_offload;
  3075. backend_output = llama_backend_offload;
  3076. } else {
  3077. backend_norm = GGML_BACKEND_CPU;
  3078. backend_output = GGML_BACKEND_CPU;
  3079. }
  3080. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3081. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  3082. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3083. model.output_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, backend_output);
  3084. }
  3085. const uint32_t n_ff = hparams.n_ff;
  3086. const int i_gpu_start = n_layer - n_gpu_layers;
  3087. model.layers.resize(n_layer);
  3088. for (uint32_t i = 0; i < n_layer; ++i) {
  3089. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3090. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3091. auto & layer = model.layers[i];
  3092. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3093. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  3094. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  3095. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  3096. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3097. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  3098. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  3099. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  3100. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3101. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  3102. }
  3103. } break;
  3104. case LLM_ARCH_PLAMO:
  3105. {
  3106. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  3107. // output
  3108. {
  3109. ggml_backend_type backend_norm;
  3110. ggml_backend_type backend_output;
  3111. if (n_gpu_layers > int(n_layer)) {
  3112. backend_norm = llama_backend_offload;
  3113. backend_output = llama_backend_offload_split;
  3114. } else {
  3115. backend_norm = GGML_BACKEND_CPU;
  3116. backend_output = GGML_BACKEND_CPU;
  3117. }
  3118. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3119. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3120. }
  3121. const uint32_t n_ff = hparams.n_ff;
  3122. const int i_gpu_start = n_layer - n_gpu_layers;
  3123. model.layers.resize(n_layer);
  3124. for (uint32_t i = 0; i < n_layer; ++i) {
  3125. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3126. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3127. auto & layer = model.layers[i];
  3128. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3129. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  3130. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  3131. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  3132. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3133. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  3134. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  3135. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3136. }
  3137. } break;
  3138. case LLM_ARCH_GPT2:
  3139. {
  3140. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  3141. model.pos_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU);
  3142. // output
  3143. {
  3144. ggml_backend_type backend_norm;
  3145. ggml_backend_type backend_output;
  3146. if (n_gpu_layers > int(n_layer)) {
  3147. backend_norm = llama_backend_offload;
  3148. backend_output = llama_backend_offload_split;
  3149. } else {
  3150. backend_norm = GGML_BACKEND_CPU;
  3151. backend_output = GGML_BACKEND_CPU;
  3152. }
  3153. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3154. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  3155. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3156. }
  3157. const uint32_t n_ff = hparams.n_ff;
  3158. const int i_gpu_start = n_layer - n_gpu_layers;
  3159. model.layers.resize(n_layer);
  3160. for (uint32_t i = 0; i < n_layer; ++i) {
  3161. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3162. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3163. auto & layer = model.layers[i];
  3164. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3165. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  3166. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  3167. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  3168. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3169. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  3170. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  3171. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  3172. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  3173. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  3174. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3175. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  3176. }
  3177. } break;
  3178. default:
  3179. throw std::runtime_error("unknown architecture");
  3180. }
  3181. }
  3182. ml.done_getting_tensors();
  3183. ml.init_mapping();
  3184. // allocate tensors
  3185. size_t vram_weights = 0;
  3186. size_t buf_size = 0;
  3187. ggml_backend_buffer_type_t buft = llama_default_buffer_type(n_gpu_layers);
  3188. for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  3189. // GGML_BACKEND_GPU tensors are for CUDA and OpenCL only, which are handled separately without ggml-backend
  3190. if (t->backend == GGML_BACKEND_CPU) {
  3191. buf_size += GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), ggml_backend_buft_get_alignment(buft));
  3192. } else {
  3193. vram_weights += ggml_nbytes(t);
  3194. }
  3195. }
  3196. // create backend buffer
  3197. ggml_backend_buffer_t buf_mmap = nullptr;
  3198. #ifdef GGML_USE_METAL
  3199. if (n_gpu_layers > 0) {
  3200. if (ml.use_mmap) {
  3201. const size_t max_size = ggml_get_max_tensor_size(ctx);
  3202. model.buf = ggml_backend_metal_buffer_from_ptr(ml.mapping->addr, ml.mapping->size, max_size);
  3203. buf_mmap = model.buf;
  3204. } else {
  3205. model.buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_metal_buffer_type());
  3206. }
  3207. }
  3208. #elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  3209. // for testing only
  3210. if (n_gpu_layers > 0) {
  3211. model.buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_cuda_buffer_type(0));
  3212. }
  3213. #endif
  3214. if (model.buf == nullptr) {
  3215. // CPU backend, and indirectly CUDA and OpenCL
  3216. if (ml.use_mmap) {
  3217. model.buf = ggml_backend_cpu_buffer_from_ptr(ml.mapping->addr, ml.mapping->size);
  3218. buf_mmap = model.buf;
  3219. } else {
  3220. // allocate only CPU tensors
  3221. model.buf = ggml_backend_buft_alloc_buffer(buft, buf_size);
  3222. ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(model.buf);
  3223. for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  3224. if (t->backend == GGML_BACKEND_CPU) {
  3225. ggml_tallocr_alloc(alloc, t);
  3226. }
  3227. }
  3228. ggml_tallocr_free(alloc);
  3229. }
  3230. }
  3231. if (use_mlock && ggml_backend_buffer_is_host(model.buf)) {
  3232. model.mlock_buf.init (ggml_backend_buffer_get_base(model.buf));
  3233. model.mlock_buf.grow_to(ggml_backend_buffer_get_size(model.buf));
  3234. }
  3235. // print memory requirements
  3236. {
  3237. size_t sys_mem_required = ctx_size + buf_size;
  3238. if (sys_mem_required > 0) {
  3239. LLAMA_LOG_INFO("%s: system memory used = %7.2f MiB\n", __func__, sys_mem_required / 1024.0 / 1024.0);
  3240. }
  3241. if (vram_weights > 0) {
  3242. LLAMA_LOG_INFO("%s: VRAM used = %7.2f MiB\n", __func__, vram_weights / 1024.0 / 1024.0);
  3243. }
  3244. #if (defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)) || defined(GGML_USE_CLBLAST)
  3245. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  3246. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  3247. if (n_gpu_layers > (int) hparams.n_layer) {
  3248. LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
  3249. }
  3250. const int max_backend_supported_layers = hparams.n_layer + 1;
  3251. const int max_offloadable_layers = hparams.n_layer + 1;
  3252. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  3253. #endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  3254. }
  3255. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  3256. ggml_cuda_set_tensor_split(tensor_split);
  3257. #else
  3258. GGML_UNUSED(tensor_split);
  3259. #endif // GGML_USE_CUBLAS
  3260. // populate tensors_by_name
  3261. for (int i = 0; i < ml.n_tensors; ++i) {
  3262. struct ggml_tensor * cur = ggml_get_tensor(ctx, ml.get_tensor_name(i));
  3263. model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
  3264. }
  3265. if (!ml.load_all_data(ctx, progress_callback, progress_callback_user_data, buf_mmap, use_mlock ? &model.mlock_mmap : NULL)) {
  3266. return false;
  3267. }
  3268. model.mapping = std::move(ml.mapping);
  3269. // loading time will be recalculate after the first eval, so
  3270. // we take page faults deferred by mmap() into consideration
  3271. model.t_load_us = ggml_time_us() - model.t_start_us;
  3272. return true;
  3273. }
  3274. // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
  3275. static int llama_model_load(const std::string & fname, llama_model & model, const llama_model_params & params) {
  3276. try {
  3277. llama_model_loader ml(fname, params.use_mmap, params.kv_overrides);
  3278. model.hparams.vocab_only = params.vocab_only;
  3279. llm_load_arch (ml, model);
  3280. llm_load_hparams(ml, model);
  3281. llm_load_vocab (ml, model);
  3282. llm_load_print_meta(ml, model);
  3283. if (model.hparams.n_vocab != model.vocab.id_to_token.size()) {
  3284. throw std::runtime_error("vocab size mismatch");
  3285. }
  3286. if (params.vocab_only) {
  3287. LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
  3288. return 0;
  3289. }
  3290. if (!llm_load_tensors(
  3291. ml, model, params.n_gpu_layers, params.main_gpu, params.tensor_split, params.use_mlock,
  3292. params.progress_callback, params.progress_callback_user_data
  3293. )) {
  3294. return -2;
  3295. }
  3296. } catch (const std::exception & err) {
  3297. LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
  3298. return -1;
  3299. }
  3300. return 0;
  3301. }
  3302. //
  3303. // llm_build
  3304. //
  3305. using llm_build_cb = std::function<void(struct ggml_tensor * cur, const char * name, int nl)>;
  3306. enum llm_rope_type {
  3307. LLM_ROPE,
  3308. LLM_ROPE_NEOX,
  3309. LLM_ROPE_GLM,
  3310. };
  3311. enum llm_ffn_op_type {
  3312. LLM_FFN_SILU,
  3313. LLM_FFN_GELU,
  3314. LLM_FFN_RELU,
  3315. LLM_FFN_RELU_SQR,
  3316. };
  3317. enum llm_ffn_gate_type {
  3318. LLM_FFN_SEQ,
  3319. LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
  3320. };
  3321. enum llm_norm_type {
  3322. LLM_NORM,
  3323. LLM_NORM_RMS,
  3324. };
  3325. static struct ggml_tensor * llm_build_inp_embd(
  3326. struct ggml_context * ctx,
  3327. const llama_hparams & hparams,
  3328. const llama_batch & batch,
  3329. struct ggml_tensor * tok_embd,
  3330. const llm_build_cb & cb) {
  3331. const int64_t n_embd = hparams.n_embd;
  3332. struct ggml_tensor * inpL;
  3333. if (batch.token) {
  3334. struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
  3335. cb(inp_tokens, "inp_tokens", -1);
  3336. inpL = ggml_get_rows(ctx, tok_embd, inp_tokens);
  3337. } else {
  3338. #ifdef GGML_USE_MPI
  3339. GGML_ASSERT(false && "not implemented");
  3340. #endif
  3341. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
  3342. }
  3343. return inpL;
  3344. }
  3345. // Persimmon: n_rot = n_embd_head/2
  3346. // Other: n_rot = n_embd_head
  3347. static void llm_build_k_shift(
  3348. struct ggml_context * ctx,
  3349. const llama_hparams & hparams,
  3350. const llama_cparams & cparams,
  3351. const llama_kv_cache & kv,
  3352. struct ggml_cgraph * graph,
  3353. llm_rope_type type,
  3354. int64_t n_ctx,
  3355. int n_rot,
  3356. float freq_base,
  3357. float freq_scale,
  3358. const llm_build_cb & cb) {
  3359. const int64_t n_layer = hparams.n_layer;
  3360. const int64_t n_head_kv = hparams.n_head_kv;
  3361. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  3362. const int64_t n_embd_head = hparams.n_embd_head();
  3363. const int32_t n_orig_ctx = cparams.n_yarn_orig_ctx;
  3364. const float ext_factor = cparams.yarn_ext_factor;
  3365. const float attn_factor = cparams.yarn_attn_factor;
  3366. const float beta_fast = cparams.yarn_beta_fast;
  3367. const float beta_slow = cparams.yarn_beta_slow;
  3368. GGML_ASSERT(n_embd_head % n_rot == 0);
  3369. struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_ctx);
  3370. cb(K_shift, "K_shift", -1);
  3371. int rope_type = 0;
  3372. switch (type) {
  3373. case LLM_ROPE: rope_type = 0; break;
  3374. case LLM_ROPE_NEOX: rope_type = 2; break;
  3375. case LLM_ROPE_GLM: rope_type = 4; break;
  3376. }
  3377. for (int il = 0; il < n_layer; ++il) {
  3378. struct ggml_tensor * tmp =
  3379. // we rotate only the first n_rot dimensions
  3380. ggml_rope_custom_inplace(ctx,
  3381. ggml_view_3d(ctx, kv.k_l[il],
  3382. n_embd_head, n_head_kv, n_ctx,
  3383. ggml_row_size(kv.k_l[il]->type, n_embd_head),
  3384. ggml_row_size(kv.k_l[il]->type, n_embd_gqa),
  3385. 0),
  3386. K_shift, n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  3387. ext_factor, attn_factor, beta_fast, beta_slow);
  3388. cb(tmp, "K_shifted", il);
  3389. ggml_build_forward_expand(graph, tmp);
  3390. }
  3391. }
  3392. static void llm_build_kv_store(
  3393. struct ggml_context * ctx,
  3394. const llama_hparams & hparams,
  3395. const llama_kv_cache & kv,
  3396. struct ggml_cgraph * graph,
  3397. struct ggml_tensor * k_cur,
  3398. struct ggml_tensor * v_cur,
  3399. int64_t n_ctx,
  3400. int32_t n_tokens,
  3401. int32_t kv_head,
  3402. const llm_build_cb & cb,
  3403. int64_t il) {
  3404. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  3405. // compute the transposed [n_tokens, n_embd] V matrix
  3406. struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, n_embd_gqa, n_tokens));
  3407. //struct ggml_tensor * v_cur_t = ggml_transpose(ctx, v_cur); // TODO: reshape above is likely not needed
  3408. cb(v_cur_t, "v_cur_t", il);
  3409. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_gqa,
  3410. (ggml_row_size(kv.k_l[il]->type, n_embd_gqa))*kv_head);
  3411. cb(k_cache_view, "k_cache_view", il);
  3412. struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_gqa,
  3413. ( n_ctx)*ggml_element_size(kv.v_l[il]),
  3414. (kv_head)*ggml_element_size(kv.v_l[il]));
  3415. cb(v_cache_view, "v_cache_view", il);
  3416. // important: storing RoPE-ed version of K in the KV cache!
  3417. ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view));
  3418. ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur_t, v_cache_view));
  3419. }
  3420. static struct ggml_tensor * llm_build_norm(
  3421. struct ggml_context * ctx,
  3422. struct ggml_tensor * cur,
  3423. const llama_hparams & hparams,
  3424. struct ggml_tensor * mw,
  3425. struct ggml_tensor * mb,
  3426. llm_norm_type type,
  3427. const llm_build_cb & cb,
  3428. int il) {
  3429. switch (type) {
  3430. case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break;
  3431. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break;
  3432. }
  3433. if (mw || mb) {
  3434. cb(cur, "norm", il);
  3435. }
  3436. if (mw) {
  3437. cur = ggml_mul(ctx, cur, mw);
  3438. if (mb) {
  3439. cb(cur, "norm_w", il);
  3440. }
  3441. }
  3442. if (mb) {
  3443. cur = ggml_add(ctx, cur, mb);
  3444. }
  3445. return cur;
  3446. }
  3447. static struct ggml_tensor * llm_build_ffn(
  3448. struct ggml_context * ctx,
  3449. struct ggml_tensor * cur,
  3450. struct ggml_tensor * up,
  3451. struct ggml_tensor * up_b,
  3452. struct ggml_tensor * gate,
  3453. struct ggml_tensor * gate_b,
  3454. struct ggml_tensor * down,
  3455. struct ggml_tensor * down_b,
  3456. struct ggml_tensor * act_scales,
  3457. llm_ffn_op_type type_op,
  3458. llm_ffn_gate_type type_gate,
  3459. const llm_build_cb & cb,
  3460. int il) {
  3461. struct ggml_tensor * tmp = ggml_mul_mat(ctx, up, cur);
  3462. cb(tmp, "ffn_up", il);
  3463. if (up_b) {
  3464. tmp = ggml_add(ctx, tmp, up_b);
  3465. cb(tmp, "ffn_up_b", il);
  3466. }
  3467. if (gate) {
  3468. switch (type_gate) {
  3469. case LLM_FFN_SEQ:
  3470. {
  3471. cur = ggml_mul_mat(ctx, gate, tmp);
  3472. cb(cur, "ffn_gate", il);
  3473. } break;
  3474. case LLM_FFN_PAR:
  3475. {
  3476. cur = ggml_mul_mat(ctx, gate, cur);
  3477. cb(cur, "ffn_gate", il);
  3478. } break;
  3479. }
  3480. if (gate_b) {
  3481. cur = ggml_add(ctx, cur, gate_b);
  3482. cb(cur, "ffn_gate_b", il);
  3483. }
  3484. } else {
  3485. cur = tmp;
  3486. }
  3487. switch (type_op) {
  3488. case LLM_FFN_SILU:
  3489. {
  3490. cur = ggml_silu(ctx, cur);
  3491. cb(cur, "ffn_silu", il);
  3492. } break;
  3493. case LLM_FFN_GELU:
  3494. {
  3495. cur = ggml_gelu(ctx, cur);
  3496. cb(cur, "ffn_gelu", il);
  3497. if (act_scales != NULL) {
  3498. cur = ggml_div(ctx, cur, act_scales);
  3499. cb(cur, "ffn_act", il);
  3500. }
  3501. } break;
  3502. case LLM_FFN_RELU:
  3503. {
  3504. cur = ggml_relu(ctx, cur);
  3505. cb(cur, "ffn_relu", il);
  3506. } break;
  3507. case LLM_FFN_RELU_SQR:
  3508. {
  3509. cur = ggml_relu(ctx, cur);
  3510. cb(cur, "ffn_relu", il);
  3511. cur = ggml_sqr(ctx, cur);
  3512. cb(cur, "ffn_sqr(relu)", il);
  3513. } break;
  3514. }
  3515. if (type_gate == LLM_FFN_PAR) {
  3516. cur = ggml_mul(ctx, cur, tmp);
  3517. cb(cur, "ffn_gate_par", il);
  3518. }
  3519. cur = ggml_mul_mat(ctx, down, cur);
  3520. if (down_b) {
  3521. cb(cur, "ffn_down", il);
  3522. }
  3523. if (down_b) {
  3524. cur = ggml_add(ctx, cur, down_b);
  3525. }
  3526. return cur;
  3527. }
  3528. // if max_alibi_bias > 0 then apply ALiBi
  3529. static struct ggml_tensor * llm_build_kqv(
  3530. struct ggml_context * ctx,
  3531. const llama_model & model,
  3532. const llama_hparams & hparams,
  3533. const llama_kv_cache & kv,
  3534. struct ggml_tensor * wo,
  3535. struct ggml_tensor * wo_b,
  3536. struct ggml_tensor * q_cur,
  3537. struct ggml_tensor * kq_mask,
  3538. int64_t n_ctx,
  3539. int32_t n_tokens,
  3540. int32_t n_kv,
  3541. float max_alibi_bias,
  3542. float kq_scale,
  3543. const llm_build_cb & cb,
  3544. int il) {
  3545. const int64_t n_embd = hparams.n_embd;
  3546. const int64_t n_head = hparams.n_head;
  3547. const int64_t n_head_kv = hparams.n_head_kv;
  3548. const int64_t n_embd_head = hparams.n_embd_head();
  3549. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  3550. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  3551. cb(q, "q", il);
  3552. struct ggml_tensor * k =
  3553. ggml_view_3d(ctx, kv.k_l[il],
  3554. n_embd_head, n_kv, n_head_kv,
  3555. ggml_row_size(kv.k_l[il]->type, n_embd_gqa),
  3556. ggml_row_size(kv.k_l[il]->type, n_embd_head),
  3557. 0);
  3558. cb(k, "k", il);
  3559. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  3560. cb(kq, "kq", il);
  3561. if (model.arch == LLM_ARCH_PHI2) {
  3562. // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
  3563. // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
  3564. ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  3565. }
  3566. if (max_alibi_bias > 0.0f) {
  3567. // temporary branch until we figure out how to handle ggml_alibi through ggml_add
  3568. kq = ggml_scale(ctx, kq, kq_scale);
  3569. cb(kq, "kq_scaled", il);
  3570. if (max_alibi_bias > 0.0f) {
  3571. // TODO: n_head or n_head_kv
  3572. // TODO: K-shift is likely not working
  3573. // TODO: change to ggml_add
  3574. kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, max_alibi_bias);
  3575. cb(kq, "kq_scaled_alibi", il);
  3576. }
  3577. kq = ggml_add(ctx, kq, kq_mask);
  3578. cb(kq, "kq_masked", il);
  3579. kq = ggml_soft_max(ctx, kq);
  3580. cb(kq, "kq_soft_max", il);
  3581. } else {
  3582. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale);
  3583. cb(kq, "kq_soft_max_ext", il);
  3584. }
  3585. // split cached v into n_head heads
  3586. struct ggml_tensor * v =
  3587. ggml_view_3d(ctx, kv.v_l[il],
  3588. n_kv, n_embd_head, n_head_kv,
  3589. ggml_element_size(kv.v_l[il])*n_ctx,
  3590. ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head,
  3591. 0);
  3592. cb(v, "v", il);
  3593. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  3594. cb(kqv, "kqv", il);
  3595. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  3596. cb(kqv_merged, "kqv_merged", il);
  3597. struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, n_embd, n_tokens);
  3598. cb(cur, "kqv_merged_cont", il);
  3599. cur = ggml_mul_mat(ctx, wo, cur);
  3600. if (wo_b) {
  3601. cb(cur, "kqv_wo", il);
  3602. }
  3603. if (wo_b) {
  3604. cur = ggml_add(ctx, cur, wo_b);
  3605. }
  3606. return cur;
  3607. }
  3608. struct llm_build_context {
  3609. const llama_model & model;
  3610. const llama_hparams & hparams;
  3611. const llama_cparams & cparams;
  3612. const llama_batch & batch;
  3613. const llama_kv_cache & kv_self;
  3614. const int64_t n_embd;
  3615. const int64_t n_layer;
  3616. const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
  3617. const int64_t n_head;
  3618. const int64_t n_head_kv;
  3619. const int64_t n_embd_head;
  3620. const int64_t n_embd_gqa;
  3621. const int64_t n_expert;
  3622. const int64_t n_expert_used;
  3623. const float freq_base;
  3624. const float freq_scale;
  3625. const float ext_factor;
  3626. const float attn_factor;
  3627. const float beta_fast;
  3628. const float beta_slow;
  3629. const float norm_eps;
  3630. const float norm_rms_eps;
  3631. const int32_t n_tokens;
  3632. const int32_t n_kv; // size of KV cache to consider (n_kv <= n_ctx)
  3633. const int32_t kv_head; // index of where we store new KV data in the cache
  3634. const int32_t n_orig_ctx;
  3635. const bool do_rope_shift;
  3636. const llm_build_cb & cb;
  3637. std::vector<uint8_t> & buf_compute_meta;
  3638. struct ggml_context * ctx0 = nullptr;
  3639. // TODO: consider making the entire interface noexcept
  3640. llm_build_context(
  3641. llama_context & lctx,
  3642. const llama_batch & batch,
  3643. const llm_build_cb & cb,
  3644. bool worst_case) :
  3645. model (lctx.model),
  3646. hparams (model.hparams),
  3647. cparams (lctx.cparams),
  3648. batch (batch),
  3649. kv_self (lctx.kv_self),
  3650. n_embd (hparams.n_embd),
  3651. n_layer (hparams.n_layer),
  3652. n_ctx (cparams.n_ctx),
  3653. n_head (hparams.n_head),
  3654. n_head_kv (hparams.n_head_kv),
  3655. n_embd_head (hparams.n_embd_head()),
  3656. n_embd_gqa (hparams.n_embd_gqa()),
  3657. n_expert (hparams.n_expert),
  3658. n_expert_used (hparams.n_expert_used),
  3659. freq_base (cparams.rope_freq_base),
  3660. freq_scale (cparams.rope_freq_scale),
  3661. ext_factor (cparams.yarn_ext_factor),
  3662. attn_factor (cparams.yarn_attn_factor),
  3663. beta_fast (cparams.yarn_beta_fast),
  3664. beta_slow (cparams.yarn_beta_slow),
  3665. norm_eps (hparams.f_norm_eps),
  3666. norm_rms_eps (hparams.f_norm_rms_eps),
  3667. n_tokens (batch.n_tokens),
  3668. n_kv (worst_case ? n_ctx : kv_self.n),
  3669. kv_head (worst_case ? n_ctx - n_tokens : kv_self.head),
  3670. n_orig_ctx (cparams.n_yarn_orig_ctx),
  3671. do_rope_shift (worst_case || kv_self.has_shift),
  3672. cb (cb),
  3673. buf_compute_meta (lctx.buf_compute_meta) {
  3674. GGML_ASSERT(!!kv_self.ctx);
  3675. // all initializations should be done in init()
  3676. }
  3677. void init() {
  3678. struct ggml_init_params params = {
  3679. /*.mem_size =*/ buf_compute_meta.size(),
  3680. /*.mem_buffer =*/ buf_compute_meta.data(),
  3681. /*.no_alloc =*/ true,
  3682. };
  3683. ctx0 = ggml_init(params);
  3684. }
  3685. void free() {
  3686. if (ctx0) {
  3687. ggml_free(ctx0);
  3688. ctx0 = nullptr;
  3689. }
  3690. }
  3691. struct ggml_cgraph * build_llama() {
  3692. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3693. GGML_ASSERT(n_embd_head == hparams.n_rot);
  3694. struct ggml_tensor * cur;
  3695. struct ggml_tensor * inpL;
  3696. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3697. cb(inpL, "inp_embd", -1);
  3698. // inp_pos - contains the positions
  3699. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3700. cb(inp_pos, "inp_pos", -1);
  3701. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3702. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3703. cb(KQ_mask, "KQ_mask", -1);
  3704. // shift the entire K-cache if needed
  3705. if (do_rope_shift) {
  3706. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  3707. }
  3708. for (int il = 0; il < n_layer; ++il) {
  3709. struct ggml_tensor * inpSA = inpL;
  3710. // norm
  3711. cur = llm_build_norm(ctx0, inpL, hparams,
  3712. model.layers[il].attn_norm, NULL,
  3713. LLM_NORM_RMS, cb, il);
  3714. cb(cur, "attn_norm", il);
  3715. // self-attention
  3716. {
  3717. // compute Q and K and RoPE them
  3718. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  3719. cb(Qcur, "Qcur", il);
  3720. if (model.layers[il].bq) {
  3721. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  3722. cb(Qcur, "Qcur", il);
  3723. }
  3724. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  3725. cb(Kcur, "Kcur", il);
  3726. if (model.layers[il].bk) {
  3727. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  3728. cb(Kcur, "Kcur", il);
  3729. }
  3730. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  3731. cb(Vcur, "Vcur", il);
  3732. if (model.layers[il].bv) {
  3733. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  3734. cb(Vcur, "Vcur", il);
  3735. }
  3736. Qcur = ggml_rope_custom(
  3737. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  3738. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3739. ext_factor, attn_factor, beta_fast, beta_slow
  3740. );
  3741. cb(Qcur, "Qcur", il);
  3742. Kcur = ggml_rope_custom(
  3743. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  3744. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3745. ext_factor, attn_factor, beta_fast, beta_slow
  3746. );
  3747. cb(Kcur, "Kcur", il);
  3748. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3749. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  3750. model.layers[il].wo, model.layers[il].bo,
  3751. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  3752. cb(cur, "kqv_out", il);
  3753. }
  3754. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  3755. cb(ffn_inp, "ffn_inp", il);
  3756. // feed-forward network
  3757. if (model.layers[il].ffn_gate_inp == nullptr) {
  3758. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3759. model.layers[il].ffn_norm, NULL,
  3760. LLM_NORM_RMS, cb, il);
  3761. cb(cur, "ffn_norm", il);
  3762. cur = llm_build_ffn(ctx0, cur,
  3763. model.layers[il].ffn_up, NULL,
  3764. model.layers[il].ffn_gate, NULL,
  3765. model.layers[il].ffn_down, NULL,
  3766. NULL,
  3767. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  3768. cb(cur, "ffn_out", il);
  3769. } else {
  3770. // MoE branch
  3771. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3772. model.layers[il].ffn_norm, NULL,
  3773. LLM_NORM_RMS, cb, il);
  3774. cb(cur, "ffn_norm", il);
  3775. ggml_tensor * logits = ggml_mul_mat(ctx0, model.layers[il].ffn_gate_inp, cur); // [n_tokens, num_experts]
  3776. cb(logits, "ffn_moe_logits", il);
  3777. ggml_tensor * probs = ggml_soft_max(ctx0, logits); // [n_tokens, num_experts]
  3778. cb(probs, "ffn_moe_probs", il);
  3779. // select experts
  3780. ggml_tensor * selected_experts = ggml_top_k(ctx0, probs, n_expert_used); // [n_tokens, num_experts_per_tok]
  3781. cb(selected_experts->src[0], "ffn_moe_argsort", il);
  3782. ggml_tensor * weights = ggml_get_rows(ctx0,
  3783. ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts);
  3784. cb(weights, "ffn_moe_weights", il);
  3785. weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); // [n_tokens, num_experts_per_tok]
  3786. ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights);
  3787. cb(weights_sum, "ffn_moe_weights_sum", il);
  3788. weights = ggml_div(ctx0, weights, weights_sum); // [n_tokens, num_experts_per_tok]
  3789. cb(weights, "ffn_moe_weights_norm", il);
  3790. // compute expert outputs
  3791. ggml_tensor * moe_out = nullptr;
  3792. for (int i = 0; i < n_expert_used; ++i) {
  3793. ggml_tensor * cur_expert;
  3794. ggml_tensor * cur_up = ggml_mul_mat_id(ctx0, model.layers[il].ffn_up_exp, n_expert, selected_experts, i, cur);
  3795. cb(cur_up, "ffn_moe_up", il);
  3796. ggml_tensor * cur_gate = ggml_mul_mat_id(ctx0, model.layers[il].ffn_gate_exp, n_expert, selected_experts, i, cur);
  3797. cb(cur_gate, "ffn_moe_gate", il);
  3798. cur_gate = ggml_silu(ctx0, cur_gate);
  3799. cb(cur_gate, "ffn_moe_silu", il);
  3800. cur_expert = ggml_mul(ctx0, cur_up, cur_gate); // [n_tokens, n_embd]
  3801. cb(cur_expert, "ffn_moe_gate_par", il);
  3802. cur_expert = ggml_mul_mat_id(ctx0, model.layers[il].ffn_down_exp, n_expert, selected_experts, i, cur_expert); // [n_tokens, n_embd]
  3803. cb(cur_expert, "ffn_moe_down", il);
  3804. cur_expert = ggml_mul(ctx0, cur_expert,
  3805. ggml_view_2d(ctx0, weights, 1, n_tokens, weights->nb[1], i*weights->nb[0]));
  3806. cb(cur_expert, "ffn_moe_weighted", il);
  3807. if (i == 0) {
  3808. moe_out = cur_expert;
  3809. } else {
  3810. moe_out = ggml_add(ctx0, moe_out, cur_expert);
  3811. cb(moe_out, "ffn_moe_out", il);
  3812. }
  3813. }
  3814. cur = moe_out;
  3815. }
  3816. cur = ggml_add(ctx0, cur, ffn_inp);
  3817. cb(cur, "l_out", il);
  3818. // input for next layer
  3819. inpL = cur;
  3820. }
  3821. cur = inpL;
  3822. cur = llm_build_norm(ctx0, cur, hparams,
  3823. model.output_norm, NULL,
  3824. LLM_NORM_RMS, cb, -1);
  3825. cb(cur, "result_norm", -1);
  3826. // lm_head
  3827. cur = ggml_mul_mat(ctx0, model.output, cur);
  3828. cb(cur, "result_output", -1);
  3829. ggml_build_forward_expand(gf, cur);
  3830. return gf;
  3831. }
  3832. struct ggml_cgraph * build_baichuan() {
  3833. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3834. struct ggml_tensor * cur;
  3835. struct ggml_tensor * inpL;
  3836. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3837. cb(inpL, "inp_embd", -1);
  3838. // inp_pos - contains the positions
  3839. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3840. cb(inp_pos, "inp_pos", -1);
  3841. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3842. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3843. cb(KQ_mask, "KQ_mask", -1);
  3844. // shift the entire K-cache if needed
  3845. if (do_rope_shift) {
  3846. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  3847. }
  3848. for (int il = 0; il < n_layer; ++il) {
  3849. struct ggml_tensor * inpSA = inpL;
  3850. cur = llm_build_norm(ctx0, inpL, hparams,
  3851. model.layers[il].attn_norm, NULL,
  3852. LLM_NORM_RMS, cb, il);
  3853. cb(cur, "attn_norm", il);
  3854. // self-attention
  3855. {
  3856. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  3857. cb(Qcur, "Qcur", il);
  3858. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  3859. cb(Kcur, "Kcur", il);
  3860. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  3861. cb(Vcur, "Vcur", il);
  3862. switch (model.type) {
  3863. case MODEL_7B:
  3864. Qcur = ggml_rope_custom(
  3865. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  3866. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3867. ext_factor, attn_factor, beta_fast, beta_slow
  3868. );
  3869. Kcur = ggml_rope_custom(
  3870. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  3871. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3872. ext_factor, attn_factor, beta_fast, beta_slow
  3873. );
  3874. break;
  3875. case MODEL_13B:
  3876. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens);
  3877. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens);
  3878. break;
  3879. default:
  3880. GGML_ASSERT(false);
  3881. }
  3882. cb(Qcur, "Qcur", il);
  3883. cb(Kcur, "Kcur", il);
  3884. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3885. // apply ALiBi for 13B model
  3886. const float max_alibi_bias = model.type == MODEL_13B ? 8.0f : -1.0f;
  3887. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  3888. model.layers[il].wo, NULL,
  3889. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  3890. cb(cur, "kqv_out", il);
  3891. }
  3892. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  3893. cb(ffn_inp, "ffn_inp", il);
  3894. // feed-forward network
  3895. {
  3896. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3897. model.layers[il].ffn_norm, NULL,
  3898. LLM_NORM_RMS, cb, il);
  3899. cb(cur, "ffn_norm", il);
  3900. cur = llm_build_ffn(ctx0, cur,
  3901. model.layers[il].ffn_up, NULL,
  3902. model.layers[il].ffn_gate, NULL,
  3903. model.layers[il].ffn_down, NULL,
  3904. NULL,
  3905. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  3906. cb(cur, "ffn_out", il);
  3907. }
  3908. cur = ggml_add(ctx0, cur, ffn_inp);
  3909. cb(cur, "l_out", il);
  3910. // input for next layer
  3911. inpL = cur;
  3912. }
  3913. cur = inpL;
  3914. cur = llm_build_norm(ctx0, cur, hparams,
  3915. model.output_norm, NULL,
  3916. LLM_NORM_RMS, cb, -1);
  3917. cb(cur, "result_norm", -1);
  3918. // lm_head
  3919. cur = ggml_mul_mat(ctx0, model.output, cur);
  3920. cb(cur, "result_output", -1);
  3921. ggml_build_forward_expand(gf, cur);
  3922. return gf;
  3923. }
  3924. struct ggml_cgraph * build_falcon() {
  3925. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3926. struct ggml_tensor * cur;
  3927. struct ggml_tensor * inpL;
  3928. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3929. cb(inpL, "inp_embd", -1);
  3930. // inp_pos - contains the positions
  3931. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3932. cb(inp_pos, "inp_pos", -1);
  3933. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3934. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3935. cb(KQ_mask, "KQ_mask", -1);
  3936. // shift the entire K-cache if needed
  3937. if (do_rope_shift) {
  3938. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  3939. }
  3940. for (int il = 0; il < n_layer; ++il) {
  3941. struct ggml_tensor * attn_norm;
  3942. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  3943. model.layers[il].attn_norm,
  3944. model.layers[il].attn_norm_b,
  3945. LLM_NORM, cb, il);
  3946. cb(attn_norm, "attn_norm", il);
  3947. // self-attention
  3948. {
  3949. if (model.layers[il].attn_norm_2) {
  3950. // Falcon-40B
  3951. cur = llm_build_norm(ctx0, inpL, hparams,
  3952. model.layers[il].attn_norm_2,
  3953. model.layers[il].attn_norm_2_b,
  3954. LLM_NORM, cb, il);
  3955. cb(cur, "attn_norm_2", il);
  3956. } else {
  3957. cur = attn_norm;
  3958. }
  3959. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  3960. cb(cur, "wqkv", il);
  3961. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  3962. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  3963. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  3964. cb(Qcur, "Qcur", il);
  3965. cb(Kcur, "Kcur", il);
  3966. cb(Vcur, "Vcur", il);
  3967. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  3968. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  3969. // using mode = 2 for neox mode
  3970. Qcur = ggml_rope_custom(
  3971. ctx0, Qcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
  3972. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  3973. );
  3974. cb(Qcur, "Qcur", il);
  3975. Kcur = ggml_rope_custom(
  3976. ctx0, Kcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
  3977. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  3978. );
  3979. cb(Kcur, "Kcur", il);
  3980. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3981. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  3982. model.layers[il].wo, NULL,
  3983. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  3984. cb(cur, "kqv_out", il);
  3985. }
  3986. struct ggml_tensor * ffn_inp = cur;
  3987. // feed forward
  3988. {
  3989. cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result
  3990. model.layers[il].ffn_up, NULL,
  3991. NULL, NULL,
  3992. model.layers[il].ffn_down, NULL,
  3993. NULL,
  3994. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  3995. cb(cur, "ffn_out", il);
  3996. }
  3997. cur = ggml_add(ctx0, cur, ffn_inp);
  3998. cb(cur, "l_out", il);
  3999. cur = ggml_add(ctx0, cur, inpL);
  4000. cb(cur, "l_out", il);
  4001. // input for next layer
  4002. inpL = cur;
  4003. }
  4004. cur = inpL;
  4005. // norm
  4006. cur = llm_build_norm(ctx0, cur, hparams,
  4007. model.output_norm,
  4008. model.output_norm_b,
  4009. LLM_NORM, cb, -1);
  4010. cb(cur, "result_norm", -1);
  4011. cur = ggml_mul_mat(ctx0, model.output, cur);
  4012. cb(cur, "result_output", -1);
  4013. ggml_build_forward_expand(gf, cur);
  4014. return gf;
  4015. }
  4016. struct ggml_cgraph * build_starcoder() {
  4017. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4018. struct ggml_tensor * cur;
  4019. struct ggml_tensor * pos;
  4020. struct ggml_tensor * inpL;
  4021. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4022. cb(inpL, "inp_embd", -1);
  4023. // inp_pos - contains the positions
  4024. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4025. cb(inp_pos, "inp_pos", -1);
  4026. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4027. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4028. cb(KQ_mask, "KQ_mask", -1);
  4029. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  4030. cb(pos, "pos_embd", -1);
  4031. inpL = ggml_add(ctx0, inpL, pos);
  4032. cb(inpL, "inpL", -1);
  4033. for (int il = 0; il < n_layer; ++il) {
  4034. cur = llm_build_norm(ctx0, inpL, hparams,
  4035. model.layers[il].attn_norm,
  4036. model.layers[il].attn_norm_b,
  4037. LLM_NORM, cb, il);
  4038. cb(cur, "attn_norm", il);
  4039. // self-attention
  4040. {
  4041. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4042. cb(cur, "wqkv", il);
  4043. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4044. cb(cur, "bqkv", il);
  4045. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4046. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4047. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4048. cb(Qcur, "Qcur", il);
  4049. cb(Kcur, "Kcur", il);
  4050. cb(Vcur, "Vcur", il);
  4051. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4052. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4053. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4054. model.layers[il].wo, model.layers[il].bo,
  4055. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4056. cb(cur, "kqv_out", il);
  4057. }
  4058. // add the input
  4059. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4060. cb(ffn_inp, "ffn_inp", il);
  4061. // FF
  4062. {
  4063. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4064. model.layers[il].ffn_norm,
  4065. model.layers[il].ffn_norm_b,
  4066. LLM_NORM, cb, il);
  4067. cb(cur, "ffn_norm", il);
  4068. cur = llm_build_ffn(ctx0, cur,
  4069. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4070. NULL, NULL,
  4071. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4072. NULL,
  4073. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4074. cb(cur, "ffn_out", il);
  4075. }
  4076. inpL = ggml_add(ctx0, cur, ffn_inp);
  4077. cb(inpL, "l_out", il);
  4078. }
  4079. cur = llm_build_norm(ctx0, inpL, hparams,
  4080. model.output_norm,
  4081. model.output_norm_b,
  4082. LLM_NORM, cb, -1);
  4083. cb(cur, "result_norm", -1);
  4084. cur = ggml_mul_mat(ctx0, model.output, cur);
  4085. cb(cur, "result_output", -1);
  4086. ggml_build_forward_expand(gf, cur);
  4087. return gf;
  4088. }
  4089. struct ggml_cgraph * build_persimmon() {
  4090. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4091. const int64_t n_rot = n_embd_head / 2;
  4092. struct ggml_tensor * cur;
  4093. struct ggml_tensor * inpL;
  4094. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4095. cb(inpL, "imp_embd", -1);
  4096. // inp_pos - contains the positions
  4097. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4098. cb(inp_pos, "inp_pos", -1);
  4099. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4100. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4101. cb(KQ_mask, "KQ_mask", -1);
  4102. if (do_rope_shift) {
  4103. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  4104. }
  4105. for (int il = 0; il < n_layer; ++il) {
  4106. struct ggml_tensor * residual = inpL;
  4107. cur = llm_build_norm(ctx0, inpL, hparams,
  4108. model.layers[il].attn_norm,
  4109. model.layers[il].attn_norm_b,
  4110. LLM_NORM, cb, il);
  4111. cb(cur, "attn_norm", il);
  4112. // self attention
  4113. {
  4114. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4115. cb(cur, "wqkv", il);
  4116. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4117. cb(cur, "bqkv", il);
  4118. // split qkv
  4119. GGML_ASSERT(n_head_kv == n_head);
  4120. struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens);
  4121. cb(tmpqkv, "tmpqkv", il);
  4122. struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2));
  4123. cb(tmpqkv_perm, "tmpqkv", il);
  4124. struct ggml_tensor * tmpq = ggml_view_3d(
  4125. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4126. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4127. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4128. 0
  4129. );
  4130. cb(tmpq, "tmpq", il);
  4131. struct ggml_tensor * tmpk = ggml_view_3d(
  4132. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4133. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4134. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4135. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens
  4136. );
  4137. cb(tmpk, "tmpk", il);
  4138. // Q/K Layernorm
  4139. tmpq = llm_build_norm(ctx0, tmpq, hparams,
  4140. model.layers[il].attn_q_norm,
  4141. model.layers[il].attn_q_norm_b,
  4142. LLM_NORM, cb, il);
  4143. cb(tmpq, "tmpq", il);
  4144. tmpk = llm_build_norm(ctx0, tmpk, hparams,
  4145. model.layers[il].attn_k_norm,
  4146. model.layers[il].attn_k_norm_b,
  4147. LLM_NORM, cb, il);
  4148. cb(tmpk, "tmpk", il);
  4149. // RoPE the first n_rot of q/k, pass the other half, and concat.
  4150. struct ggml_tensor * qrot = ggml_view_3d(
  4151. ctx0, tmpq, n_rot, n_head, n_tokens,
  4152. ggml_element_size(tmpq) * n_embd_head,
  4153. ggml_element_size(tmpq) * n_embd_head * n_head,
  4154. 0
  4155. );
  4156. cb(qrot, "qrot", il);
  4157. struct ggml_tensor * krot = ggml_view_3d(
  4158. ctx0, tmpk, n_rot, n_head, n_tokens,
  4159. ggml_element_size(tmpk) * n_embd_head,
  4160. ggml_element_size(tmpk) * n_embd_head * n_head,
  4161. 0
  4162. );
  4163. cb(krot, "krot", il);
  4164. // get the second half of tmpq, e.g tmpq[n_rot:, :, :]
  4165. struct ggml_tensor * qpass = ggml_view_3d(
  4166. ctx0, tmpq, n_rot, n_head, n_tokens,
  4167. ggml_element_size(tmpq) * n_embd_head,
  4168. ggml_element_size(tmpq) * n_embd_head * n_head,
  4169. ggml_element_size(tmpq) * n_rot
  4170. );
  4171. cb(qpass, "qpass", il);
  4172. struct ggml_tensor * kpass = ggml_view_3d(
  4173. ctx0, tmpk, n_rot, n_head, n_tokens,
  4174. ggml_element_size(tmpk) * n_embd_head,
  4175. ggml_element_size(tmpk) * n_embd_head * n_head,
  4176. ggml_element_size(tmpk) * n_rot
  4177. );
  4178. cb(kpass, "kpass", il);
  4179. struct ggml_tensor * qrotated = ggml_rope_custom(
  4180. ctx0, qrot, inp_pos, n_rot, 2, 0, n_orig_ctx,
  4181. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4182. );
  4183. cb(qrotated, "qrotated", il);
  4184. struct ggml_tensor * krotated = ggml_rope_custom(
  4185. ctx0, krot, inp_pos, n_rot, 2, 0, n_orig_ctx,
  4186. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4187. );
  4188. cb(krotated, "krotated", il);
  4189. // ggml currently only supports concatenation on dim=2
  4190. // so we need to permute qrot, qpass, concat, then permute back.
  4191. qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3));
  4192. cb(qrotated, "qrotated", il);
  4193. krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3));
  4194. cb(krotated, "krotated", il);
  4195. qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3));
  4196. cb(qpass, "qpass", il);
  4197. kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3));
  4198. cb(kpass, "kpass", il);
  4199. struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass);
  4200. cb(Qcur, "Qcur", il);
  4201. struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass);
  4202. cb(Kcur, "Kcur", il);
  4203. struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 2, 1, 0, 3));
  4204. cb(Q, "Q", il);
  4205. Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3));
  4206. cb(Kcur, "Kcur", il);
  4207. struct ggml_tensor * Vcur = ggml_view_3d(
  4208. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4209. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4210. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4211. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2
  4212. );
  4213. cb(Vcur, "Vcur", il);
  4214. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4215. // TODO: not tested, could be broken
  4216. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4217. model.layers[il].wo, model.layers[il].bo,
  4218. Q, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4219. cb(cur, "kqv_out", il);
  4220. }
  4221. struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
  4222. cb(ffn_inp, "ffn_inp", il);
  4223. // feed-forward network
  4224. {
  4225. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4226. model.layers[il].ffn_norm,
  4227. model.layers[il].ffn_norm_b,
  4228. LLM_NORM, cb, il);
  4229. cb(cur, "ffn_norm", il);
  4230. cur = llm_build_ffn(ctx0, cur,
  4231. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4232. NULL, NULL,
  4233. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4234. NULL,
  4235. LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il);
  4236. cb(cur, "ffn_out", il);
  4237. }
  4238. cur = ggml_add(ctx0, cur, ffn_inp);
  4239. cb(cur, "l_out", il);
  4240. inpL = cur;
  4241. }
  4242. cur = inpL;
  4243. cur = llm_build_norm(ctx0, cur, hparams,
  4244. model.output_norm,
  4245. model.output_norm_b,
  4246. LLM_NORM, cb, -1);
  4247. cb(cur, "result_norm", -1);
  4248. cur = ggml_mul_mat(ctx0, model.output, cur);
  4249. cb(cur, "result_output", -1);
  4250. ggml_build_forward_expand(gf, cur);
  4251. return gf;
  4252. }
  4253. struct ggml_cgraph * build_refact() {
  4254. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4255. struct ggml_tensor * cur;
  4256. struct ggml_tensor * inpL;
  4257. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4258. cb(inpL, "inp_embd", -1);
  4259. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4260. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4261. cb(KQ_mask, "KQ_mask", -1);
  4262. for (int il = 0; il < n_layer; ++il) {
  4263. struct ggml_tensor * inpSA = inpL;
  4264. cur = llm_build_norm(ctx0, inpL, hparams,
  4265. model.layers[il].attn_norm, NULL,
  4266. LLM_NORM_RMS, cb, il);
  4267. cb(cur, "attn_norm", il);
  4268. // self-attention
  4269. {
  4270. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4271. cb(Qcur, "Qcur", il);
  4272. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4273. cb(Kcur, "Kcur", il);
  4274. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4275. cb(Vcur, "Vcur", il);
  4276. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4277. cb(Kcur, "Kcur", il);
  4278. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4279. cb(Qcur, "Qcur", il);
  4280. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4281. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4282. model.layers[il].wo, NULL,
  4283. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4284. cb(cur, "kqv_out", il);
  4285. }
  4286. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4287. cb(ffn_inp, "ffn_inp", il);
  4288. // feed-forward network
  4289. {
  4290. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4291. model.layers[il].ffn_norm, NULL,
  4292. LLM_NORM_RMS, cb, il);
  4293. cb(cur, "ffn_norm", il);
  4294. cur = llm_build_ffn(ctx0, cur,
  4295. model.layers[il].ffn_up, NULL,
  4296. model.layers[il].ffn_gate, NULL,
  4297. model.layers[il].ffn_down, NULL,
  4298. NULL,
  4299. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4300. cb(cur, "ffn_out", il);
  4301. }
  4302. cur = ggml_add(ctx0, cur, ffn_inp);
  4303. cb(cur, "l_out", il);
  4304. // input for next layer
  4305. inpL = cur;
  4306. }
  4307. cur = inpL;
  4308. cur = llm_build_norm(ctx0, cur, hparams,
  4309. model.output_norm, NULL,
  4310. LLM_NORM_RMS, cb, -1);
  4311. cb(cur, "result_norm", -1);
  4312. // lm_head
  4313. cur = ggml_mul_mat(ctx0, model.output, cur);
  4314. cb(cur, "result_output", -1);
  4315. ggml_build_forward_expand(gf, cur);
  4316. return gf;
  4317. }
  4318. struct ggml_cgraph * build_bloom() {
  4319. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4320. struct ggml_tensor * cur;
  4321. struct ggml_tensor * inpL;
  4322. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4323. cb(inpL, "inp_embd", -1);
  4324. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4325. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4326. cb(KQ_mask, "KQ_mask", -1);
  4327. inpL = llm_build_norm(ctx0, inpL, hparams,
  4328. model.tok_norm,
  4329. model.tok_norm_b,
  4330. LLM_NORM, cb, -1);
  4331. cb(inpL, "inp_norm", -1);
  4332. for (int il = 0; il < n_layer; ++il) {
  4333. cur = llm_build_norm(ctx0, inpL, hparams,
  4334. model.layers[il].attn_norm,
  4335. model.layers[il].attn_norm_b,
  4336. LLM_NORM, cb, il);
  4337. cb(cur, "attn_norm", il);
  4338. // self-attention
  4339. {
  4340. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4341. cb(cur, "wqkv", il);
  4342. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4343. cb(cur, "bqkv", il);
  4344. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4345. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4346. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4347. cb(Qcur, "Qcur", il);
  4348. cb(Kcur, "Kcur", il);
  4349. cb(Vcur, "Vcur", il);
  4350. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4351. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4352. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4353. model.layers[il].wo, model.layers[il].bo,
  4354. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4355. cb(cur, "kqv_out", il);
  4356. }
  4357. // Add the input
  4358. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4359. cb(ffn_inp, "ffn_inp", il);
  4360. // FF
  4361. {
  4362. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4363. model.layers[il].ffn_norm,
  4364. model.layers[il].ffn_norm_b,
  4365. LLM_NORM, cb, il);
  4366. cb(cur, "ffn_norm", il);
  4367. cur = llm_build_ffn(ctx0, cur,
  4368. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4369. NULL, NULL,
  4370. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4371. NULL,
  4372. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4373. cb(cur, "ffn_out", il);
  4374. }
  4375. inpL = ggml_add(ctx0, cur, ffn_inp);
  4376. cb(inpL, "l_out", il);
  4377. }
  4378. cur = llm_build_norm(ctx0, inpL, hparams,
  4379. model.output_norm,
  4380. model.output_norm_b,
  4381. LLM_NORM, cb, -1);
  4382. cb(cur, "result_norm", -1);
  4383. cur = ggml_mul_mat(ctx0, model.output, cur);
  4384. cb(cur, "result_output", -1);
  4385. ggml_build_forward_expand(gf, cur);
  4386. return gf;
  4387. }
  4388. struct ggml_cgraph * build_mpt() {
  4389. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4390. struct ggml_tensor * cur;
  4391. struct ggml_tensor * inpL;
  4392. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4393. cb(inpL, "inp_embd", -1);
  4394. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4395. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4396. cb(KQ_mask, "KQ_mask", -1);
  4397. for (int il = 0; il < n_layer; ++il) {
  4398. struct ggml_tensor * attn_norm;
  4399. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  4400. model.layers[il].attn_norm,
  4401. NULL,
  4402. LLM_NORM, cb, il);
  4403. cb(attn_norm, "attn_norm", il);
  4404. // self-attention
  4405. {
  4406. cur = attn_norm;
  4407. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4408. cb(cur, "wqkv", il);
  4409. if (hparams.f_clamp_kqv > 0.0f) {
  4410. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  4411. cb(cur, "wqkv_clamped", il);
  4412. }
  4413. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4414. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4415. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4416. cb(Qcur, "Qcur", il);
  4417. cb(Kcur, "Kcur", il);
  4418. cb(Vcur, "Vcur", il);
  4419. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4420. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4421. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4422. model.layers[il].wo, NULL,
  4423. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4424. cb(cur, "kqv_out", il);
  4425. }
  4426. // Add the input
  4427. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4428. cb(ffn_inp, "ffn_inp", il);
  4429. // feed forward
  4430. {
  4431. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4432. model.layers[il].ffn_norm,
  4433. NULL,
  4434. LLM_NORM, cb, il);
  4435. cb(cur, "ffn_norm", il);
  4436. cur = llm_build_ffn(ctx0, cur,
  4437. model.layers[il].ffn_up, NULL,
  4438. NULL, NULL,
  4439. model.layers[il].ffn_down, NULL,
  4440. model.layers[il].ffn_act,
  4441. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4442. cb(cur, "ffn_out", il);
  4443. }
  4444. cur = ggml_add(ctx0, cur, ffn_inp);
  4445. cb(cur, "l_out", il);
  4446. // input for next layer
  4447. inpL = cur;
  4448. }
  4449. cur = inpL;
  4450. cur = llm_build_norm(ctx0, cur, hparams,
  4451. model.output_norm,
  4452. NULL,
  4453. LLM_NORM, cb, -1);
  4454. cb(cur, "result_norm", -1);
  4455. cur = ggml_mul_mat(ctx0, model.output, cur);
  4456. cb(cur, "result_output", -1);
  4457. ggml_build_forward_expand(gf, cur);
  4458. return gf;
  4459. }
  4460. struct ggml_cgraph * build_stablelm() {
  4461. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  4462. struct ggml_tensor * cur;
  4463. struct ggml_tensor * inpL;
  4464. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4465. cb(inpL, "inp_embd", -1);
  4466. // inp_pos - contains the positions
  4467. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4468. cb(inp_pos, "inp_pos", -1);
  4469. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4470. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4471. cb(KQ_mask, "KQ_mask", -1);
  4472. // shift the entire K-cache if needed
  4473. if (do_rope_shift) {
  4474. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, hparams.n_rot, freq_base, freq_scale, cb);
  4475. }
  4476. for (int il = 0; il < n_layer; ++il) {
  4477. struct ggml_tensor * inpSA = inpL;
  4478. // norm
  4479. cur = llm_build_norm(ctx0, inpL, hparams,
  4480. model.layers[il].attn_norm,
  4481. model.layers[il].attn_norm_b,
  4482. LLM_NORM, cb, il);
  4483. cb(cur, "attn_norm", il);
  4484. // self-attention
  4485. {
  4486. // compute Q and K and RoPE them
  4487. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4488. cb(Qcur, "Qcur", il);
  4489. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4490. cb(Kcur, "Kcur", il);
  4491. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4492. cb(Vcur, "Vcur", il);
  4493. Qcur = ggml_rope_custom(
  4494. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  4495. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4496. ext_factor, attn_factor, beta_fast, beta_slow
  4497. );
  4498. cb(Qcur, "Qcur", il);
  4499. Kcur = ggml_rope_custom(
  4500. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  4501. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4502. ext_factor, attn_factor, beta_fast, beta_slow
  4503. );
  4504. cb(Kcur, "Kcur", il);
  4505. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4506. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4507. model.layers[il].wo, NULL,
  4508. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4509. cb(cur, "kqv_out", il);
  4510. }
  4511. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4512. cb(ffn_inp, "ffn_inp", il);
  4513. // feed-forward network
  4514. {
  4515. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4516. model.layers[il].ffn_norm,
  4517. model.layers[il].ffn_norm_b,
  4518. LLM_NORM, cb, il);
  4519. cb(cur, "ffn_norm", il);
  4520. cur = llm_build_ffn(ctx0, cur,
  4521. model.layers[il].ffn_up, NULL,
  4522. model.layers[il].ffn_gate, NULL,
  4523. model.layers[il].ffn_down, NULL,
  4524. NULL,
  4525. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4526. cb(cur, "ffn_out", il);
  4527. }
  4528. cur = ggml_add(ctx0, cur, ffn_inp);
  4529. cb(cur, "l_out", il);
  4530. // input for next layer
  4531. inpL = cur;
  4532. }
  4533. cur = inpL;
  4534. cur = llm_build_norm(ctx0, cur, hparams,
  4535. model.output_norm,
  4536. model.output_norm_b,
  4537. LLM_NORM, cb, -1);
  4538. cb(cur, "result_norm", -1);
  4539. // lm_head
  4540. cur = ggml_mul_mat(ctx0, model.output, cur);
  4541. cb(cur, "result_output", -1);
  4542. ggml_build_forward_expand(gf, cur);
  4543. return gf;
  4544. }
  4545. struct ggml_cgraph * build_qwen() {
  4546. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4547. struct ggml_tensor * cur;
  4548. struct ggml_tensor * inpL;
  4549. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4550. cb(inpL, "inp_embd", -1);
  4551. // inp_pos - contains the positions
  4552. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4553. cb(inp_pos, "inp_pos", -1);
  4554. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4555. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4556. cb(KQ_mask, "KQ_mask", -1);
  4557. // shift the entire K-cache if needed
  4558. if (do_rope_shift) {
  4559. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  4560. }
  4561. for (int il = 0; il < n_layer; ++il) {
  4562. struct ggml_tensor * inpSA = inpL;
  4563. cur = llm_build_norm(ctx0, inpL, hparams,
  4564. model.layers[il].attn_norm, NULL,
  4565. LLM_NORM_RMS, cb, il);
  4566. cb(cur, "attn_norm", il);
  4567. // self-attention
  4568. {
  4569. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4570. cb(cur, "wqkv", il);
  4571. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4572. cb(cur, "bqkv", il);
  4573. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4574. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4575. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd)));
  4576. cb(Qcur, "Qcur", il);
  4577. cb(Kcur, "Kcur", il);
  4578. cb(Vcur, "Vcur", il);
  4579. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4580. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4581. // using mode = 2 for neox mode
  4582. Qcur = ggml_rope_custom(
  4583. ctx0, Qcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
  4584. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4585. );
  4586. cb(Qcur, "Qcur", il);
  4587. Kcur = ggml_rope_custom(
  4588. ctx0, Kcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
  4589. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4590. );
  4591. cb(Kcur, "Kcur", il);
  4592. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4593. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4594. model.layers[il].wo, NULL,
  4595. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4596. cb(cur, "kqv_out", il);
  4597. }
  4598. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4599. cb(ffn_inp, "ffn_inp", il);
  4600. // feed-forward forward
  4601. {
  4602. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4603. model.layers[il].ffn_norm, NULL,
  4604. LLM_NORM_RMS, cb, il);
  4605. cb(cur, "ffn_norm", il);
  4606. cur = llm_build_ffn(ctx0, cur,
  4607. model.layers[il].ffn_up, NULL,
  4608. model.layers[il].ffn_gate, NULL,
  4609. model.layers[il].ffn_down, NULL,
  4610. NULL,
  4611. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4612. cb(cur, "ffn_out", il);
  4613. }
  4614. cur = ggml_add(ctx0, cur, ffn_inp);
  4615. cb(cur, "l_out", il);
  4616. // input for next layer
  4617. inpL = cur;
  4618. }
  4619. cur = inpL;
  4620. cur = llm_build_norm(ctx0, cur, hparams,
  4621. model.output_norm, NULL,
  4622. LLM_NORM_RMS, cb, -1);
  4623. cb(cur, "result_norm", -1);
  4624. // lm_head
  4625. cur = ggml_mul_mat(ctx0, model.output, cur);
  4626. cb(cur, "result_output", -1);
  4627. ggml_build_forward_expand(gf, cur);
  4628. return gf;
  4629. }
  4630. struct ggml_cgraph * build_phi2() {
  4631. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4632. struct ggml_tensor * cur;
  4633. struct ggml_tensor * attn_norm_output;
  4634. struct ggml_tensor * ffn_output;
  4635. struct ggml_tensor * inpL;
  4636. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4637. cb(inpL, "inp_embd", -1);
  4638. // inp_pos - contains the positions
  4639. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4640. cb(inp_pos, "inp_pos", -1);
  4641. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4642. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4643. cb(KQ_mask, "KQ_mask", -1);
  4644. // shift the entire K-cache if needed
  4645. if (do_rope_shift) {
  4646. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  4647. }
  4648. for (int il = 0; il < n_layer; ++il) {
  4649. attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
  4650. model.layers[il].attn_norm,
  4651. model.layers[il].attn_norm_b,
  4652. LLM_NORM, cb, il);
  4653. cb(attn_norm_output, "attn_norm", il);
  4654. // self-attention
  4655. {
  4656. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output);
  4657. cb(cur, "wqkv", il);
  4658. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4659. cb(cur, "bqkv", il);
  4660. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4661. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4662. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4663. cb(Qcur, "Qcur", il);
  4664. cb(Kcur, "Kcur", il);
  4665. cb(Vcur, "Vcur", il);
  4666. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4667. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4668. Qcur = ggml_rope_custom(
  4669. ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4670. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4671. );
  4672. cb(Qcur, "Qcur", il);
  4673. // with phi2, we scale the Q to avoid precision issues
  4674. // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
  4675. Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head)));
  4676. cb(Qcur, "Qcur", il);
  4677. Kcur = ggml_rope_custom(
  4678. ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4679. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4680. );
  4681. cb(Kcur, "Kcur", il);
  4682. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4683. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4684. model.layers[il].wo, model.layers[il].bo,
  4685. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f, cb, il);
  4686. cb(cur, "kqv_out", il);
  4687. }
  4688. // FF
  4689. {
  4690. ffn_output = llm_build_ffn(ctx0, attn_norm_output,
  4691. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4692. NULL, NULL,
  4693. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4694. NULL,
  4695. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4696. cb(ffn_output, "ffn_out", il);
  4697. }
  4698. cur = ggml_add(ctx0, cur, ffn_output);
  4699. cb(cur, "l_out", il);
  4700. cur = ggml_add(ctx0, cur, inpL);
  4701. cb(cur, "l_out", il);
  4702. inpL = cur;
  4703. }
  4704. cur = llm_build_norm(ctx0, inpL, hparams,
  4705. model.output_norm,
  4706. model.output_norm_b,
  4707. LLM_NORM, cb, -1);
  4708. cb(cur, "result_norm", -1);
  4709. cur = ggml_mul_mat(ctx0, model.output, cur);
  4710. cb(cur, "result_output_no_bias", -1);
  4711. cur = ggml_add(ctx0, cur, model.output_b);
  4712. cb(cur, "result_output", -1);
  4713. ggml_build_forward_expand(gf, cur);
  4714. return gf;
  4715. }
  4716. struct ggml_cgraph * build_plamo() {
  4717. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  4718. struct ggml_tensor * cur;
  4719. struct ggml_tensor * inpL;
  4720. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4721. cb(inpL, "inp_embd", -1);
  4722. // inp_pos - contains the positions
  4723. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4724. cb(inp_pos, "inp_pos", -1);
  4725. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4726. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4727. cb(KQ_mask, "KQ_mask", -1);
  4728. // shift the entire K-cache if needed
  4729. if (do_rope_shift) {
  4730. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  4731. }
  4732. for (int il = 0; il < n_layer; ++il) {
  4733. // norm
  4734. cur = llm_build_norm(ctx0, inpL, hparams,
  4735. model.layers[il].attn_norm, NULL,
  4736. LLM_NORM_RMS, cb, il);
  4737. cb(cur, "attn_norm", il);
  4738. struct ggml_tensor * attention_norm = cur;
  4739. // self-attention
  4740. {
  4741. // compute Q and K and RoPE them
  4742. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4743. cb(Qcur, "Qcur", il);
  4744. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4745. cb(Kcur, "Kcur", il);
  4746. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4747. cb(Vcur, "Vcur", il);
  4748. Qcur = ggml_rope_custom(
  4749. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  4750. n_embd_head, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4751. ext_factor, attn_factor, beta_fast, beta_slow);
  4752. cb(Qcur, "Qcur", il);
  4753. Kcur = ggml_rope_custom(
  4754. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  4755. n_embd_head, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4756. ext_factor, attn_factor, beta_fast, beta_slow);
  4757. cb(Kcur, "Kcur", il);
  4758. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4759. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4760. model.layers[il].wo, NULL,
  4761. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4762. cb(cur, "kqv_out", il);
  4763. }
  4764. struct ggml_tensor * sa_out = cur;
  4765. cur = attention_norm;
  4766. // feed-forward network
  4767. {
  4768. cur = llm_build_ffn(ctx0, cur,
  4769. model.layers[il].ffn_up, NULL,
  4770. model.layers[il].ffn_gate, NULL,
  4771. model.layers[il].ffn_down, NULL,
  4772. NULL,
  4773. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4774. cb(cur, "ffn_out", il);
  4775. }
  4776. cur = ggml_add(ctx0, cur, sa_out);
  4777. cb(cur, "l_out", il);
  4778. cur = ggml_add(ctx0, cur, inpL);
  4779. cb(cur, "l_out", il);
  4780. // input for next layer
  4781. inpL = cur;
  4782. }
  4783. cur = inpL;
  4784. cur = llm_build_norm(ctx0, cur, hparams,
  4785. model.output_norm, NULL,
  4786. LLM_NORM_RMS, cb, -1);
  4787. cb(cur, "result_norm", -1);
  4788. // lm_head
  4789. cur = ggml_mul_mat(ctx0, model.output, cur);
  4790. cb(cur, "result_output", -1);
  4791. ggml_build_forward_expand(gf, cur);
  4792. return gf;
  4793. }
  4794. struct ggml_cgraph * build_gpt2() {
  4795. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4796. struct ggml_tensor * cur;
  4797. struct ggml_tensor * pos;
  4798. struct ggml_tensor * inpL;
  4799. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4800. cb(inpL, "inp_embd", -1);
  4801. // inp_pos - contains the positions
  4802. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4803. cb(inp_pos, "inp_pos", -1);
  4804. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4805. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4806. cb(KQ_mask, "KQ_mask", -1);
  4807. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  4808. cb(pos, "pos_embd", -1);
  4809. inpL = ggml_add(ctx0, inpL, pos);
  4810. cb(inpL, "inpL", -1);
  4811. for (int il = 0; il < n_layer; ++il) {
  4812. cur = llm_build_norm(ctx0, inpL, hparams,
  4813. model.layers[il].attn_norm,
  4814. model.layers[il].attn_norm_b,
  4815. LLM_NORM, cb, il);
  4816. cb(cur, "attn_norm", il);
  4817. // self-attention
  4818. {
  4819. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4820. cb(cur, "wqkv", il);
  4821. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4822. cb(cur, "bqkv", il);
  4823. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4824. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4825. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4826. cb(Qcur, "Qcur", il);
  4827. cb(Kcur, "Kcur", il);
  4828. cb(Vcur, "Vcur", il);
  4829. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4830. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4831. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4832. model.layers[il].wo, model.layers[il].bo,
  4833. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4834. cb(cur, "kqv_out", il);
  4835. }
  4836. // add the input
  4837. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4838. cb(ffn_inp, "ffn_inp", il);
  4839. // FF
  4840. {
  4841. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4842. model.layers[il].ffn_norm,
  4843. model.layers[il].ffn_norm_b,
  4844. LLM_NORM, cb, il);
  4845. cb(cur, "ffn_norm", il);
  4846. cur = llm_build_ffn(ctx0, cur,
  4847. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4848. NULL, NULL,
  4849. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4850. NULL,
  4851. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4852. cb(cur, "ffn_out", il);
  4853. }
  4854. inpL = ggml_add(ctx0, cur, ffn_inp);
  4855. cb(inpL, "l_out", il);
  4856. }
  4857. cur = llm_build_norm(ctx0, inpL, hparams,
  4858. model.output_norm,
  4859. model.output_norm_b,
  4860. LLM_NORM, cb, -1);
  4861. cb(cur, "result_norm", -1);
  4862. cur = ggml_mul_mat(ctx0, model.output, cur);
  4863. cb(cur, "result_output", -1);
  4864. ggml_build_forward_expand(gf, cur);
  4865. return gf;
  4866. }
  4867. };
  4868. //
  4869. // tensor offloading helpers
  4870. //
  4871. // TODO: will be removed with backend v2
  4872. enum llm_offload_func_e {
  4873. OFFLOAD_FUNC_NOP,
  4874. OFFLOAD_FUNC,
  4875. OFFLOAD_FUNC_FRC, // force offload
  4876. OFFLOAD_FUNC_KQV,
  4877. OFFLOAD_FUNC_NR,
  4878. OFFLOAD_FUNC_EMB, // embeddings
  4879. OFFLOAD_FUNC_OUT,
  4880. };
  4881. // TODO: will be removed with backend v2
  4882. struct llm_offload_trie {
  4883. struct node {
  4884. ~node() {
  4885. for (int i = 0; i < 256; ++i) {
  4886. if (children[i]) {
  4887. delete children[i];
  4888. }
  4889. }
  4890. }
  4891. node * children[256] = { nullptr };
  4892. llm_offload_func_e func = OFFLOAD_FUNC_NOP;
  4893. };
  4894. llm_offload_trie() {
  4895. root = new node;
  4896. }
  4897. llm_offload_trie(const std::unordered_map<const char *, llm_offload_func_e> & map) {
  4898. root = new node;
  4899. for (const auto & kv : map) {
  4900. add(kv.first, kv.second);
  4901. }
  4902. }
  4903. ~llm_offload_trie() {
  4904. delete root;
  4905. }
  4906. void add(const char * name, llm_offload_func_e func) {
  4907. node * cur = root;
  4908. for (int i = 0; ; ++i) {
  4909. const uint8_t c = name[i];
  4910. if (!c) {
  4911. break;
  4912. }
  4913. if (!cur->children[c]) {
  4914. cur->children[c] = new node;
  4915. }
  4916. cur = cur->children[c];
  4917. }
  4918. cur->func = func;
  4919. }
  4920. llm_offload_func_e find(const char * name) const {
  4921. const node * cur = root;
  4922. for (int i = 0; ; ++i) {
  4923. const uint8_t c = name[i];
  4924. if (!c) {
  4925. break;
  4926. }
  4927. if (!cur->children[c]) {
  4928. return OFFLOAD_FUNC_NOP;
  4929. }
  4930. cur = cur->children[c];
  4931. }
  4932. return cur->func;
  4933. }
  4934. node * root = nullptr;
  4935. };
  4936. // TODO: will be removed with backend v2
  4937. static const std::unordered_map<const char *, llm_offload_func_e> k_offload_map = {
  4938. //{ "inp_tokens", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel
  4939. //{ "inp_embd", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel
  4940. { "pos_embd", OFFLOAD_FUNC_NR },
  4941. { "inp_pos", OFFLOAD_FUNC_FRC }, // this is often used for KQ ops (e.g. rope)
  4942. { "KQ_mask", OFFLOAD_FUNC_FRC },
  4943. { "K_shift", OFFLOAD_FUNC_FRC },
  4944. { "K_shifted", OFFLOAD_FUNC },
  4945. { "inp_norm", OFFLOAD_FUNC_NR },
  4946. { "inp_norm_w", OFFLOAD_FUNC_NR },
  4947. { "inp_norm_wb", OFFLOAD_FUNC_NR },
  4948. { "norm", OFFLOAD_FUNC },
  4949. { "norm_w", OFFLOAD_FUNC },
  4950. { "norm_wb", OFFLOAD_FUNC },
  4951. { "attn_norm", OFFLOAD_FUNC },
  4952. { "attn_norm_2", OFFLOAD_FUNC },
  4953. { "wqkv", OFFLOAD_FUNC_KQV },
  4954. { "bqkv", OFFLOAD_FUNC_KQV },
  4955. { "wqkv_clamped", OFFLOAD_FUNC_KQV },
  4956. { "tmpk", OFFLOAD_FUNC_KQV },
  4957. { "tmpq", OFFLOAD_FUNC_KQV },
  4958. { "tmpv", OFFLOAD_FUNC_KQV },
  4959. { "Kcur", OFFLOAD_FUNC_KQV },
  4960. { "Qcur", OFFLOAD_FUNC_KQV },
  4961. { "Vcur", OFFLOAD_FUNC_KQV },
  4962. { "krot", OFFLOAD_FUNC_KQV },
  4963. { "qrot", OFFLOAD_FUNC_KQV },
  4964. { "kpass", OFFLOAD_FUNC_KQV },
  4965. { "qpass", OFFLOAD_FUNC_KQV },
  4966. { "krotated", OFFLOAD_FUNC_KQV },
  4967. { "qrotated", OFFLOAD_FUNC_KQV },
  4968. { "q", OFFLOAD_FUNC_KQV },
  4969. { "k", OFFLOAD_FUNC_KQV },
  4970. { "kq", OFFLOAD_FUNC_KQV },
  4971. { "kq_scaled", OFFLOAD_FUNC_KQV },
  4972. { "kq_scaled_alibi", OFFLOAD_FUNC_KQV },
  4973. { "kq_masked", OFFLOAD_FUNC_KQV },
  4974. { "kq_soft_max", OFFLOAD_FUNC_KQV },
  4975. { "kq_soft_max_ext", OFFLOAD_FUNC_KQV },
  4976. { "v", OFFLOAD_FUNC_KQV },
  4977. { "kqv", OFFLOAD_FUNC_KQV },
  4978. { "kqv_merged", OFFLOAD_FUNC_KQV },
  4979. { "kqv_merged_cont", OFFLOAD_FUNC_KQV },
  4980. { "kqv_wo", OFFLOAD_FUNC_KQV },
  4981. { "kqv_out", OFFLOAD_FUNC_KQV },
  4982. { "ffn_inp", OFFLOAD_FUNC },
  4983. { "ffn_norm", OFFLOAD_FUNC },
  4984. { "ffn_up", OFFLOAD_FUNC },
  4985. { "ffn_up_b", OFFLOAD_FUNC },
  4986. { "ffn_gate", OFFLOAD_FUNC },
  4987. { "ffn_gate_b", OFFLOAD_FUNC },
  4988. { "ffn_gate_par", OFFLOAD_FUNC },
  4989. { "ffn_act", OFFLOAD_FUNC },
  4990. { "ffn_down", OFFLOAD_FUNC },
  4991. { "ffn_down_b", OFFLOAD_FUNC },
  4992. { "ffn_out", OFFLOAD_FUNC },
  4993. { "ffn_silu", OFFLOAD_FUNC },
  4994. { "ffn_gelu", OFFLOAD_FUNC },
  4995. { "ffn_relu", OFFLOAD_FUNC },
  4996. { "ffn_sqr(relu)", OFFLOAD_FUNC },
  4997. { "ffn_moe_logits", OFFLOAD_FUNC },
  4998. { "ffn_moe_probs", OFFLOAD_FUNC },
  4999. { "ffn_moe_argsort", OFFLOAD_FUNC },
  5000. { "ffn_moe_weights", OFFLOAD_FUNC },
  5001. { "ffn_moe_weights_sum", OFFLOAD_FUNC },
  5002. { "ffn_moe_weights_norm", OFFLOAD_FUNC },
  5003. { "ffn_moe_weighted", OFFLOAD_FUNC },
  5004. { "ffn_moe_up", OFFLOAD_FUNC },
  5005. { "ffn_moe_gate", OFFLOAD_FUNC },
  5006. { "ffn_moe_silu", OFFLOAD_FUNC },
  5007. { "ffn_moe_gate_par", OFFLOAD_FUNC },
  5008. { "ffn_moe_down", OFFLOAD_FUNC },
  5009. { "ffn_moe_out", OFFLOAD_FUNC },
  5010. { "l_out", OFFLOAD_FUNC },
  5011. { "result_norm", OFFLOAD_FUNC_EMB },
  5012. { "result_output_no_bias", OFFLOAD_FUNC_EMB },
  5013. { "result_output", OFFLOAD_FUNC_OUT },
  5014. };
  5015. static llm_offload_trie k_offload_func_trie(k_offload_map);
  5016. static struct ggml_cgraph * llama_build_graph(
  5017. llama_context & lctx,
  5018. const llama_batch & batch) {
  5019. const auto & model = lctx.model;
  5020. // check if we should build the worst-case graph (for memory measurement)
  5021. const bool worst_case = ggml_allocr_is_measure(lctx.alloc);
  5022. // keep track of the input that has already been allocated
  5023. bool alloc_inp_tokens = false;
  5024. bool alloc_inp_embd = false;
  5025. bool alloc_inp_pos = false;
  5026. bool alloc_inp_KQ_mask = false;
  5027. bool alloc_inp_K_shift = false;
  5028. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  5029. const bool do_offload = true;
  5030. #else
  5031. const bool do_offload = true; // TODO: set to false after finishing refactoring
  5032. #endif
  5033. int n_non_view = 0; // number of non-view tensors that have been processed by the callback
  5034. // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
  5035. // TODO: will be removed with backend v2
  5036. llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
  5037. if (il >= 0) {
  5038. ggml_format_name(cur, "%s-%d", name, il);
  5039. } else {
  5040. ggml_set_name(cur, name);
  5041. }
  5042. //
  5043. // allocate input tensors and set input data
  5044. //
  5045. // TODO: will be removed with backend v2
  5046. if (!alloc_inp_tokens && strcmp(name, "inp_tokens") == 0) {
  5047. ggml_allocr_alloc(lctx.alloc, cur);
  5048. if (!ggml_allocr_is_measure(lctx.alloc) && batch.token) {
  5049. const int64_t n_tokens = cur->ne[0];
  5050. ggml_backend_tensor_set(cur, batch.token, 0, n_tokens*ggml_element_size(cur));
  5051. }
  5052. alloc_inp_tokens = true;
  5053. }
  5054. if (!alloc_inp_embd && strcmp(name, "inp_embd") == 0) {
  5055. ggml_allocr_alloc(lctx.alloc, cur);
  5056. if (!ggml_allocr_is_measure(lctx.alloc) && batch.embd) {
  5057. const int64_t n_embd = cur->ne[0];
  5058. const int64_t n_tokens = cur->ne[1];
  5059. ggml_backend_tensor_set(cur, batch.embd, 0, n_tokens*n_embd*ggml_element_size(cur));
  5060. }
  5061. alloc_inp_embd = true;
  5062. }
  5063. if (!alloc_inp_pos && strcmp(name, "inp_pos") == 0) {
  5064. ggml_allocr_alloc(lctx.alloc, cur);
  5065. if (!ggml_allocr_is_measure(lctx.alloc) && batch.pos) {
  5066. const int64_t n_tokens = cur->ne[0];
  5067. static_assert(std::is_same<llama_pos, int32_t>::value, "llama_pos must be int32_t");
  5068. ggml_backend_tensor_set(cur, batch.pos, 0, n_tokens*ggml_element_size(cur));
  5069. }
  5070. alloc_inp_pos = true;
  5071. }
  5072. if (!alloc_inp_KQ_mask && strcmp(name, "KQ_mask") == 0) {
  5073. ggml_allocr_alloc(lctx.alloc, cur);
  5074. if (!ggml_allocr_is_measure(lctx.alloc)) {
  5075. const int64_t n_kv = cur->ne[0];
  5076. const int64_t n_tokens = cur->ne[1];
  5077. float * data;
  5078. if (ggml_backend_buffer_is_host(cur->buffer)) {
  5079. data = (float *) cur->data;
  5080. } else {
  5081. lctx.buf_copy.resize(ggml_nbytes(cur));
  5082. data = (float *) lctx.buf_copy.data();
  5083. }
  5084. for (int h = 0; h < 1; ++h) {
  5085. for (int j = 0; j < n_tokens; ++j) {
  5086. const llama_pos pos = batch.pos[j];
  5087. const llama_seq_id seq_id = batch.seq_id[j][0];
  5088. for (int i = 0; i < n_kv; ++i) {
  5089. float f;
  5090. if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
  5091. f = -INFINITY;
  5092. } else {
  5093. f = 0;
  5094. }
  5095. data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
  5096. }
  5097. }
  5098. }
  5099. if (data != cur->data) {
  5100. ggml_backend_tensor_set(cur, data, 0, ggml_nbytes(cur));
  5101. }
  5102. }
  5103. alloc_inp_KQ_mask = true;
  5104. }
  5105. if (!alloc_inp_K_shift && strcmp(name, "K_shift") == 0) {
  5106. ggml_allocr_alloc(lctx.alloc, cur);
  5107. if (!ggml_allocr_is_measure(lctx.alloc)) {
  5108. const int64_t n_ctx = cur->ne[0];
  5109. int32_t * data;
  5110. if (ggml_backend_buffer_is_host(cur->buffer)) {
  5111. data = (int32_t *) cur->data;
  5112. } else {
  5113. lctx.buf_copy.resize(ggml_nbytes(cur));
  5114. data = (int32_t *) lctx.buf_copy.data();
  5115. }
  5116. for (int i = 0; i < n_ctx; ++i) {
  5117. data[i] = lctx.kv_self.cells[i].delta;
  5118. }
  5119. if (data != cur->data) {
  5120. ggml_backend_tensor_set(cur, data, 0, ggml_nbytes(cur));
  5121. }
  5122. }
  5123. alloc_inp_K_shift = true;
  5124. }
  5125. // view tensors are not processed further
  5126. if (cur->view_src != nullptr) {
  5127. return;
  5128. }
  5129. if (cur->op != GGML_OP_NONE) {
  5130. n_non_view++;
  5131. }
  5132. //
  5133. // offload layers
  5134. //
  5135. // TODO: will be removed with backend v2
  5136. //#define LLAMA_OFFLOAD_DEBUG
  5137. if (!do_offload) {
  5138. return;
  5139. }
  5140. const int n_layer = model.hparams.n_layer;
  5141. const int n_gpu_layers = model.n_gpu_layers;
  5142. const int i_gpu_start = n_layer - n_gpu_layers;
  5143. // should we offload the final norm? yes if we are not computing embeddings
  5144. const bool offload_emb = lctx.embedding.empty();
  5145. static const std::unordered_map<llm_offload_func_e, std::string, std::hash<int>> k_offload_func_name = {
  5146. { OFFLOAD_FUNC_NOP, "CPU" },
  5147. { OFFLOAD_FUNC_OUT, "CPU" },
  5148. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  5149. { OFFLOAD_FUNC, "GPU (CUDA)" },
  5150. { OFFLOAD_FUNC_FRC, "GPU (CUDA) FRC" },
  5151. { OFFLOAD_FUNC_KQV, "GPU (CUDA) KQV" },
  5152. { OFFLOAD_FUNC_NR, "GPU (CUDA) NR" },
  5153. { OFFLOAD_FUNC_EMB, "GPU (CUDA) EMB" },
  5154. #else
  5155. { OFFLOAD_FUNC, "CPU" },
  5156. { OFFLOAD_FUNC_FRC, "CPU" },
  5157. { OFFLOAD_FUNC_KQV, "CPU" },
  5158. { OFFLOAD_FUNC_NR, "CPU" },
  5159. { OFFLOAD_FUNC_EMB, "CPU" },
  5160. #endif // GGML_USE_CUBLAS
  5161. };
  5162. // check the global map for what offload function to use for this tensor
  5163. llm_offload_func_e func_e = k_offload_func_trie.find(name);
  5164. if (func_e == OFFLOAD_FUNC_NOP) {
  5165. #ifdef LLAMA_OFFLOAD_DEBUG
  5166. // if a tensor hasn't been offloaded, we warn the user
  5167. if (worst_case) {
  5168. LLAMA_LOG_WARN("%s: %32s: not offloaded (ref: %s)\n", __func__,
  5169. cur->name, "https://github.com/ggerganov/llama.cpp/pull/3837");
  5170. }
  5171. #endif
  5172. return;
  5173. }
  5174. // count the number of layers and respect the provided n_gpu_layers
  5175. switch (func_e) {
  5176. case OFFLOAD_FUNC_NOP:
  5177. case OFFLOAD_FUNC_OUT:
  5178. break;
  5179. case OFFLOAD_FUNC:
  5180. if (n_gpu_layers < n_layer) {
  5181. if (il < i_gpu_start) {
  5182. func_e = OFFLOAD_FUNC_NOP;
  5183. }
  5184. }
  5185. break;
  5186. case OFFLOAD_FUNC_FRC:
  5187. if (!lctx.cparams.offload_kqv) {
  5188. func_e = OFFLOAD_FUNC_NOP;
  5189. } break;
  5190. case OFFLOAD_FUNC_KQV:
  5191. if (!lctx.cparams.offload_kqv) {
  5192. func_e = OFFLOAD_FUNC_NOP;
  5193. } else {
  5194. if (n_gpu_layers < n_layer) {
  5195. if (il < i_gpu_start) {
  5196. func_e = OFFLOAD_FUNC_NOP;
  5197. }
  5198. }
  5199. }
  5200. break;
  5201. case OFFLOAD_FUNC_NR:
  5202. if (n_gpu_layers <= n_layer + 0) {
  5203. func_e = OFFLOAD_FUNC_NOP;
  5204. }
  5205. break;
  5206. case OFFLOAD_FUNC_EMB:
  5207. if (!offload_emb || n_gpu_layers < n_layer) {
  5208. func_e = OFFLOAD_FUNC_NOP;
  5209. }
  5210. break;
  5211. default: GGML_ASSERT(false);
  5212. }
  5213. offload_func_t func = ggml_offload_nop;
  5214. // this is needed for compatibility with Metal for example
  5215. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  5216. static offload_func_t ggml_offload_gpu = ggml_cuda_assign_buffers_no_alloc;
  5217. #else
  5218. static offload_func_t ggml_offload_gpu = ggml_offload_nop;
  5219. #endif
  5220. switch (func_e) {
  5221. case OFFLOAD_FUNC_NOP:
  5222. case OFFLOAD_FUNC_OUT: func = ggml_offload_nop; break;
  5223. case OFFLOAD_FUNC:
  5224. case OFFLOAD_FUNC_KQV:
  5225. case OFFLOAD_FUNC_FRC:
  5226. case OFFLOAD_FUNC_NR:
  5227. case OFFLOAD_FUNC_EMB: func = ggml_offload_gpu; break;
  5228. default: GGML_ASSERT(false);
  5229. }
  5230. // apply offload function to the tensor
  5231. func(cur);
  5232. #ifdef LLAMA_OFFLOAD_DEBUG
  5233. if (worst_case) {
  5234. LLAMA_LOG_INFO("%s: %32s: %s\n", __func__, cur->name, k_offload_func_name.at(func_e).c_str());
  5235. }
  5236. #endif
  5237. };
  5238. struct ggml_cgraph * result = NULL;
  5239. struct llm_build_context llm(lctx, batch, cb, worst_case);
  5240. llm.init();
  5241. switch (model.arch) {
  5242. case LLM_ARCH_LLAMA:
  5243. {
  5244. result = llm.build_llama();
  5245. } break;
  5246. case LLM_ARCH_BAICHUAN:
  5247. {
  5248. result = llm.build_baichuan();
  5249. } break;
  5250. case LLM_ARCH_FALCON:
  5251. {
  5252. result = llm.build_falcon();
  5253. } break;
  5254. case LLM_ARCH_STARCODER:
  5255. {
  5256. result = llm.build_starcoder();
  5257. } break;
  5258. case LLM_ARCH_PERSIMMON:
  5259. {
  5260. result = llm.build_persimmon();
  5261. } break;
  5262. case LLM_ARCH_REFACT:
  5263. {
  5264. result = llm.build_refact();
  5265. } break;
  5266. case LLM_ARCH_BLOOM:
  5267. {
  5268. result = llm.build_bloom();
  5269. } break;
  5270. case LLM_ARCH_MPT:
  5271. {
  5272. result = llm.build_mpt();
  5273. } break;
  5274. case LLM_ARCH_STABLELM:
  5275. {
  5276. result = llm.build_stablelm();
  5277. } break;
  5278. case LLM_ARCH_QWEN:
  5279. {
  5280. result = llm.build_qwen();
  5281. } break;
  5282. case LLM_ARCH_PHI2:
  5283. {
  5284. result = llm.build_phi2();
  5285. } break;
  5286. case LLM_ARCH_PLAMO:
  5287. {
  5288. result = llm.build_plamo();
  5289. } break;
  5290. case LLM_ARCH_GPT2:
  5291. {
  5292. result = llm.build_gpt2();
  5293. } break;
  5294. default:
  5295. GGML_ASSERT(false);
  5296. }
  5297. llm.free();
  5298. if (worst_case) {
  5299. int n_non_view_total = 0;
  5300. for (int i = 0; i < result->n_nodes; ++i) {
  5301. if (result->nodes[i]->view_src == nullptr) {
  5302. n_non_view_total++;
  5303. }
  5304. }
  5305. LLAMA_LOG_INFO("%s: non-view tensors processed: %d/%d\n", __func__, n_non_view, n_non_view_total);
  5306. if (n_non_view != n_non_view_total) {
  5307. LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__);
  5308. LLAMA_LOG_WARN("%s: not all non-view tensors have been processed with a callback\n", __func__);
  5309. LLAMA_LOG_WARN("%s: this can indicate an inefficiency in the graph implementation\n", __func__);
  5310. LLAMA_LOG_WARN("%s: build with LLAMA_OFFLOAD_DEBUG for more info\n", __func__);
  5311. LLAMA_LOG_WARN("%s: ref: https://github.com/ggerganov/llama.cpp/pull/3837\n", __func__);
  5312. LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__);
  5313. }
  5314. }
  5315. return result;
  5316. }
  5317. // decode a batch of tokens by evaluating the transformer
  5318. //
  5319. // - lctx: llama context
  5320. // - batch: batch to evaluate
  5321. //
  5322. // return 0 on success
  5323. // return positive int on warning
  5324. // return negative int on error
  5325. //
  5326. static int llama_decode_internal(
  5327. llama_context & lctx,
  5328. llama_batch batch) {
  5329. const uint32_t n_tokens = batch.n_tokens;
  5330. if (n_tokens == 0) {
  5331. LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
  5332. return -1;
  5333. }
  5334. const auto & model = lctx.model;
  5335. const auto & hparams = model.hparams;
  5336. const auto & cparams = lctx.cparams;
  5337. const auto n_batch = cparams.n_batch;
  5338. GGML_ASSERT(n_tokens <= n_batch);
  5339. int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
  5340. GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
  5341. const int64_t t_start_us = ggml_time_us();
  5342. #ifdef GGML_USE_MPI
  5343. // TODO: needs fix after #3228
  5344. GGML_ASSERT(false && "not implemented");
  5345. //ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
  5346. #endif
  5347. GGML_ASSERT(n_threads > 0);
  5348. auto & kv_self = lctx.kv_self;
  5349. GGML_ASSERT(!!kv_self.ctx);
  5350. const int64_t n_embd = hparams.n_embd;
  5351. const int64_t n_vocab = hparams.n_vocab;
  5352. // helpers for smoother batch API transition
  5353. // after deprecating the llama_eval calls, these will be removed
  5354. std::vector<llama_pos> pos;
  5355. std::vector<int32_t> n_seq_id;
  5356. std::vector<llama_seq_id *> seq_id_arr;
  5357. std::vector<std::vector<llama_seq_id>> seq_id;
  5358. if (batch.pos == nullptr) {
  5359. pos.resize(n_tokens);
  5360. for (uint32_t i = 0; i < n_tokens; i++) {
  5361. pos[i] = batch.all_pos_0 + i*batch.all_pos_1;
  5362. }
  5363. batch.pos = pos.data();
  5364. }
  5365. if (batch.seq_id == nullptr) {
  5366. n_seq_id.resize(n_tokens);
  5367. seq_id.resize(n_tokens);
  5368. seq_id_arr.resize(n_tokens);
  5369. for (uint32_t i = 0; i < n_tokens; i++) {
  5370. n_seq_id[i] = 1;
  5371. seq_id[i].resize(1);
  5372. seq_id[i][0] = batch.all_seq_id;
  5373. seq_id_arr[i] = seq_id[i].data();
  5374. }
  5375. batch.n_seq_id = n_seq_id.data();
  5376. batch.seq_id = seq_id_arr.data();
  5377. }
  5378. // if we have enough unused cells before the current head ->
  5379. // better to start searching from the beginning of the cache, hoping to fill it
  5380. if (kv_self.head > kv_self.used + 2*n_tokens) {
  5381. kv_self.head = 0;
  5382. }
  5383. if (!llama_kv_cache_find_slot(kv_self, batch)) {
  5384. return 1;
  5385. }
  5386. // a heuristic, to avoid attending the full cache if it is not yet utilized
  5387. // after enough generations, the benefit from this heuristic disappears
  5388. // if we start defragmenting the cache, the benefit from this will be more important
  5389. kv_self.n = std::min((int32_t) cparams.n_ctx, std::max(32, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32)));
  5390. //kv_self.n = llama_kv_cache_cell_max(kv_self);
  5391. //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
  5392. ggml_allocr_reset(lctx.alloc);
  5393. ggml_cgraph * gf = llama_build_graph(lctx, batch);
  5394. ggml_allocr_alloc_graph(lctx.alloc, gf);
  5395. // the output is always the last tensor in the graph
  5396. struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
  5397. GGML_ASSERT(strcmp(res->name, "result_output") == 0);
  5398. // the embeddings could be the second to last tensor, or the third to last tensor
  5399. struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
  5400. if (strcmp(embeddings->name, "result_norm") != 0) {
  5401. embeddings = gf->nodes[gf->n_nodes - 3];
  5402. GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
  5403. }
  5404. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  5405. char * buf_alloc_base = (char *)ggml_backend_buffer_get_base(lctx.buf_alloc);
  5406. for (int i = 0; i < gf->n_leafs; i++) {
  5407. ggml_tensor * node = gf->leafs[i];
  5408. if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
  5409. ggml_cuda_assign_scratch_offset(node, (char *)node->data - buf_alloc_base);
  5410. ggml_cuda_copy_to_device(node);
  5411. }
  5412. }
  5413. for (int i = 0; i < gf->n_nodes; i++) {
  5414. ggml_tensor * node = gf->nodes[i];
  5415. if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
  5416. ggml_cuda_assign_scratch_offset(node, (char *)node->data - buf_alloc_base);
  5417. }
  5418. }
  5419. // HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed
  5420. if (!lctx.embedding.empty()) {
  5421. embeddings->backend = GGML_BACKEND_CPU;
  5422. }
  5423. res->backend = GGML_BACKEND_CPU;
  5424. #endif
  5425. // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
  5426. // for big prompts, if BLAS is enabled, it is better to use only one thread
  5427. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  5428. // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
  5429. // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
  5430. // with the BLAS calls. need a better solution
  5431. if (n_tokens >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
  5432. n_threads = std::min(4, n_threads);
  5433. }
  5434. const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 1;
  5435. if (ggml_cpu_has_cublas() && fully_offloaded) {
  5436. n_threads = 1;
  5437. }
  5438. #ifdef GGML_USE_MPI
  5439. const int64_t n_layer = hparams.n_layer;
  5440. ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
  5441. #endif
  5442. #ifdef GGML_USE_METAL
  5443. if (ggml_backend_is_metal(lctx.backend)) {
  5444. ggml_backend_metal_set_n_cb(lctx.backend, n_threads);
  5445. }
  5446. #endif
  5447. if (ggml_backend_is_cpu(lctx.backend)) {
  5448. ggml_backend_cpu_set_n_threads(lctx.backend, n_threads);
  5449. }
  5450. ggml_backend_graph_compute(lctx.backend, gf);
  5451. #ifdef GGML_USE_MPI
  5452. ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
  5453. #endif
  5454. // update the kv ring buffer
  5455. {
  5456. if (kv_self.has_shift) {
  5457. kv_self.has_shift = false;
  5458. for (uint32_t i = 0; i < kv_self.size; ++i) {
  5459. kv_self.cells[i].delta = 0;
  5460. }
  5461. }
  5462. kv_self.head += n_tokens;
  5463. // Ensure kv cache head points to a valid index.
  5464. if (kv_self.head >= kv_self.size) {
  5465. kv_self.head = 0;
  5466. }
  5467. }
  5468. #ifdef GGML_PERF
  5469. // print timing information per ggml operation (for debugging purposes)
  5470. // requires GGML_PERF to be defined
  5471. ggml_graph_print(gf);
  5472. #endif
  5473. // plot the computation graph in dot format (for debugging purposes)
  5474. //if (n_past%100 == 0) {
  5475. // ggml_graph_dump_dot(gf, NULL, "llama.dot");
  5476. //}
  5477. // extract logits
  5478. // TODO: do not compute and extract logits if only embeddings are needed
  5479. // need to update the graphs to skip "result_output"
  5480. {
  5481. auto & logits_out = lctx.logits;
  5482. #ifndef NDEBUG
  5483. auto & logits_valid = lctx.logits_valid;
  5484. logits_valid.clear();
  5485. logits_valid.resize(n_tokens);
  5486. logits_out.clear();
  5487. #endif
  5488. if (batch.logits) {
  5489. logits_out.resize(n_vocab * n_tokens);
  5490. for (uint32_t i = 0; i < n_tokens; i++) {
  5491. if (batch.logits[i] == 0) {
  5492. continue;
  5493. }
  5494. ggml_backend_tensor_get(res, logits_out.data() + (n_vocab*i), (n_vocab*i)*sizeof(float), n_vocab*sizeof(float));
  5495. #ifndef NDEBUG
  5496. logits_valid[i] = true;
  5497. #endif
  5498. }
  5499. } else if (lctx.logits_all) {
  5500. logits_out.resize(n_vocab * n_tokens);
  5501. ggml_backend_tensor_get(res, logits_out.data(), 0, n_vocab*n_tokens*sizeof(float));
  5502. #ifndef NDEBUG
  5503. std::fill(logits_valid.begin(), logits_valid.end(), true);
  5504. #endif
  5505. } else {
  5506. logits_out.resize(n_vocab);
  5507. ggml_backend_tensor_get(res, logits_out.data(), (n_vocab*(n_tokens - 1))*sizeof(float), n_vocab*sizeof(float));
  5508. #ifndef NDEBUG
  5509. logits_valid[0] = true;
  5510. #endif
  5511. }
  5512. }
  5513. // extract embeddings
  5514. if (!lctx.embedding.empty()) {
  5515. auto & embedding_out = lctx.embedding;
  5516. embedding_out.resize(n_embd);
  5517. ggml_backend_tensor_get(embeddings, embedding_out.data(), (n_embd*(n_tokens - 1))*sizeof(float), n_embd*sizeof(float));
  5518. }
  5519. // measure the performance only for the single-token evals
  5520. if (n_tokens == 1) {
  5521. lctx.t_eval_us += ggml_time_us() - t_start_us;
  5522. lctx.n_eval++;
  5523. }
  5524. else if (n_tokens > 1) {
  5525. lctx.t_p_eval_us += ggml_time_us() - t_start_us;
  5526. lctx.n_p_eval += n_tokens;
  5527. }
  5528. // get a more accurate load time, upon first eval
  5529. // TODO: fix this
  5530. if (!lctx.has_evaluated_once) {
  5531. lctx.t_load_us = ggml_time_us() - lctx.t_start_us;
  5532. lctx.has_evaluated_once = true;
  5533. }
  5534. return 0;
  5535. }
  5536. //
  5537. // tokenizer
  5538. //
  5539. static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
  5540. return vocab.type;
  5541. }
  5542. static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
  5543. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL;
  5544. }
  5545. static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
  5546. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_UNKNOWN;
  5547. }
  5548. static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
  5549. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_CONTROL;
  5550. }
  5551. static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
  5552. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE;
  5553. }
  5554. static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id) {
  5555. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_USER_DEFINED;
  5556. }
  5557. static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
  5558. GGML_ASSERT(llama_is_byte_token(vocab, id));
  5559. const auto& token_data = vocab.id_to_token.at(id);
  5560. switch (llama_vocab_get_type(vocab)) {
  5561. case LLAMA_VOCAB_TYPE_SPM: {
  5562. auto buf = token_data.text.substr(3, 2);
  5563. return strtol(buf.c_str(), NULL, 16);
  5564. }
  5565. case LLAMA_VOCAB_TYPE_BPE: {
  5566. GGML_ASSERT(false);
  5567. return unicode_to_bytes_bpe(token_data.text);
  5568. }
  5569. default:
  5570. GGML_ASSERT(false);
  5571. }
  5572. }
  5573. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
  5574. static const char * hex = "0123456789ABCDEF";
  5575. switch (llama_vocab_get_type(vocab)) {
  5576. case LLAMA_VOCAB_TYPE_SPM: {
  5577. const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
  5578. return vocab.token_to_id.at(buf);
  5579. }
  5580. case LLAMA_VOCAB_TYPE_BPE: {
  5581. return vocab.token_to_id.at(bytes_to_unicode_bpe(ch));
  5582. }
  5583. default:
  5584. GGML_ASSERT(false);
  5585. }
  5586. }
  5587. static void llama_escape_whitespace(std::string & text) {
  5588. replace_all(text, " ", "\xe2\x96\x81");
  5589. }
  5590. static void llama_unescape_whitespace(std::string & word) {
  5591. replace_all(word, "\xe2\x96\x81", " ");
  5592. }
  5593. struct llm_symbol {
  5594. using index = int;
  5595. index prev;
  5596. index next;
  5597. const char * text;
  5598. size_t n;
  5599. };
  5600. static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
  5601. // SPM tokenizer
  5602. // original implementation:
  5603. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  5604. struct llm_bigram_spm {
  5605. struct comparator {
  5606. bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
  5607. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  5608. }
  5609. };
  5610. using queue_storage = std::vector<llm_bigram_spm>;
  5611. using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
  5612. llm_symbol::index left;
  5613. llm_symbol::index right;
  5614. float score;
  5615. size_t size;
  5616. };
  5617. struct llm_tokenizer_spm {
  5618. llm_tokenizer_spm(const llama_vocab & vocab): vocab(vocab) {}
  5619. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  5620. // split string into utf8 chars
  5621. int index = 0;
  5622. size_t offs = 0;
  5623. while (offs < text.size()) {
  5624. llm_symbol sym;
  5625. size_t len = utf8_len(text[offs]);
  5626. sym.text = text.c_str() + offs;
  5627. sym.n = std::min(len, text.size() - offs);
  5628. offs += sym.n;
  5629. sym.prev = index - 1;
  5630. sym.next = offs == text.size() ? -1 : index + 1;
  5631. index++;
  5632. symbols.emplace_back(sym);
  5633. }
  5634. // seed the work queue with all possible 2-character tokens.
  5635. for (size_t i = 1; i < symbols.size(); ++i) {
  5636. try_add_bigram(i - 1, i);
  5637. }
  5638. // keep substituting the highest frequency pairs for as long as we can.
  5639. while (!work_queue.empty()) {
  5640. auto bigram = work_queue.top();
  5641. work_queue.pop();
  5642. auto & left_sym = symbols[bigram.left];
  5643. auto & right_sym = symbols[bigram.right];
  5644. // if one of the symbols already got merged, skip it.
  5645. if (left_sym.n == 0 || right_sym.n == 0 ||
  5646. left_sym.n + right_sym.n != bigram.size) {
  5647. continue;
  5648. }
  5649. // merge the right sym into the left one
  5650. left_sym.n += right_sym.n;
  5651. right_sym.n = 0;
  5652. //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  5653. // remove the right sym from the chain
  5654. left_sym.next = right_sym.next;
  5655. if (right_sym.next >= 0) {
  5656. symbols[right_sym.next].prev = bigram.left;
  5657. }
  5658. // find more substitutions
  5659. try_add_bigram(left_sym.prev, bigram.left);
  5660. try_add_bigram(bigram.left, left_sym.next);
  5661. }
  5662. for (int i = 0; i != -1; i = symbols[i].next) {
  5663. auto & symbol = symbols[i];
  5664. resegment(symbol, output);
  5665. }
  5666. }
  5667. private:
  5668. void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
  5669. auto text = std::string(symbol.text, symbol.n);
  5670. auto token = vocab.token_to_id.find(text);
  5671. // Do we need to support is_unused?
  5672. if (token != vocab.token_to_id.end()) {
  5673. output.push_back((*token).second);
  5674. return;
  5675. }
  5676. const auto p = rev_merge.find(text);
  5677. if (p == rev_merge.end()) {
  5678. // output any symbols that did not form tokens as bytes.
  5679. for (int j = 0; j < (int)symbol.n; ++j) {
  5680. llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
  5681. output.push_back(token_id);
  5682. }
  5683. return;
  5684. }
  5685. resegment(symbols[p->second.first], output);
  5686. resegment(symbols[p->second.second], output);
  5687. }
  5688. void try_add_bigram(int left, int right) {
  5689. if (left == -1 || right == -1) {
  5690. return;
  5691. }
  5692. const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
  5693. auto token = vocab.token_to_id.find(text);
  5694. if (token == vocab.token_to_id.end()) {
  5695. return;
  5696. }
  5697. if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
  5698. return;
  5699. }
  5700. const auto & tok_data = vocab.id_to_token[(*token).second];
  5701. llm_bigram_spm bigram;
  5702. bigram.left = left;
  5703. bigram.right = right;
  5704. bigram.score = tok_data.score;
  5705. bigram.size = text.size();
  5706. work_queue.push(bigram);
  5707. // Do we need to support is_unused?
  5708. rev_merge[text] = std::make_pair(left, right);
  5709. }
  5710. const llama_vocab & vocab;
  5711. std::vector<llm_symbol> symbols;
  5712. llm_bigram_spm::queue work_queue;
  5713. std::map<std::string, std::pair<int, int>> rev_merge;
  5714. };
  5715. // BPE tokenizer
  5716. // adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
  5717. // tried to simplify unicode stuff, so most likely does not work 100% correctly!
  5718. // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
  5719. struct llm_bigram_bpe {
  5720. struct comparator {
  5721. bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
  5722. return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
  5723. }
  5724. };
  5725. using queue_storage = std::vector<llm_bigram_bpe>;
  5726. using queue = std::priority_queue<llm_bigram_bpe, queue_storage, comparator>;
  5727. llm_symbol::index left;
  5728. llm_symbol::index right;
  5729. std::string text;
  5730. int rank;
  5731. size_t size;
  5732. };
  5733. struct llm_tokenizer_bpe {
  5734. llm_tokenizer_bpe(const llama_vocab & vocab): vocab(vocab) {}
  5735. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  5736. int final_prev_index = -1;
  5737. auto word_collection = bpe_gpt2_preprocess(text);
  5738. symbols_final.clear();
  5739. for (auto & word : word_collection) {
  5740. work_queue = llm_bigram_bpe::queue();
  5741. symbols.clear();
  5742. int index = 0;
  5743. size_t offset = 0;
  5744. while (offset < word.size()) {
  5745. llm_symbol sym;
  5746. size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
  5747. sym.text = word.c_str() + offset;
  5748. sym.n = char_len;
  5749. offset += sym.n;
  5750. sym.prev = index - 1;
  5751. sym.next = offset == word.size() ? -1 : index + 1;
  5752. index++;
  5753. symbols.emplace_back(sym);
  5754. }
  5755. for (size_t i = 1; i < symbols.size(); ++i) {
  5756. add_new_bigram(i - 1, i);
  5757. }
  5758. // build token(s)
  5759. while (!work_queue.empty()) {
  5760. auto bigram = work_queue.top();
  5761. work_queue.pop();
  5762. auto & left_symbol = symbols[bigram.left];
  5763. auto & right_symbol = symbols[bigram.right];
  5764. if (left_symbol.n == 0 || right_symbol.n == 0) {
  5765. continue;
  5766. }
  5767. std::string left_token = std::string(left_symbol.text, left_symbol.n);
  5768. std::string right_token = std::string(right_symbol.text, right_symbol.n);
  5769. if (left_token + right_token != bigram.text) {
  5770. continue; // Skip this bigram if it's outdated
  5771. }
  5772. // merge the right sym into the left one
  5773. left_symbol.n += right_symbol.n;
  5774. right_symbol.n = 0;
  5775. // remove the right sym from the chain
  5776. left_symbol.next = right_symbol.next;
  5777. if (right_symbol.next >= 0) {
  5778. symbols[right_symbol.next].prev = bigram.left;
  5779. }
  5780. add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
  5781. add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
  5782. }
  5783. // add the fnished tokens to the final list keeping correct order for next and prev
  5784. for (auto & sym : symbols) {
  5785. if (sym.n > 0) {
  5786. sym.prev = final_prev_index;
  5787. sym.next = -1;
  5788. if (final_prev_index != -1) {
  5789. symbols_final[final_prev_index].next = symbols_final.size();
  5790. }
  5791. symbols_final.emplace_back(sym);
  5792. final_prev_index = symbols_final.size() - 1;
  5793. }
  5794. }
  5795. }
  5796. symbols = symbols_final;
  5797. if (!symbols.empty()) {
  5798. for (int i = 0; i != -1; i = symbols[i].next) {
  5799. auto & symbol = symbols[i];
  5800. if (symbol.n == 0) {
  5801. continue;
  5802. }
  5803. const std::string str = std::string(symbol.text, symbol.n);
  5804. const auto token = vocab.token_to_id.find(str);
  5805. if (token == vocab.token_to_id.end()) {
  5806. for (auto j = str.begin(); j != str.end(); ++j) {
  5807. std::string byte_str(1, *j);
  5808. auto token_multibyte = vocab.token_to_id.find(byte_str);
  5809. if (token_multibyte == vocab.token_to_id.end()) {
  5810. throw std::runtime_error("ERROR: byte not found in vocab");
  5811. }
  5812. output.push_back((*token_multibyte).second);
  5813. }
  5814. } else {
  5815. output.push_back((*token).second);
  5816. }
  5817. }
  5818. }
  5819. }
  5820. private:
  5821. void add_new_bigram(int left, int right) {
  5822. if (left == -1 || right == -1) {
  5823. return;
  5824. }
  5825. std::string left_token = std::string(symbols[left].text, symbols[left].n);
  5826. std::string right_token = std::string(symbols[right].text, symbols[right].n);
  5827. int rank_found = -1;
  5828. rank_found = vocab.find_bpe_rank(left_token, right_token);
  5829. if (rank_found < 0) {
  5830. return;
  5831. }
  5832. llm_bigram_bpe bigram;
  5833. bigram.left = left;
  5834. bigram.right = right;
  5835. bigram.text = left_token + right_token;
  5836. bigram.size = left_token.size() + right_token.size();
  5837. bigram.rank = rank_found;
  5838. work_queue.push(bigram);
  5839. }
  5840. std::vector<std::string> bpe_gpt2_preprocess(const std::string & text) {
  5841. std::vector<std::string> bpe_words;
  5842. std::vector<std::string> bpe_encoded_words;
  5843. std::string token = "";
  5844. // GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+
  5845. bool collecting_numeric = false;
  5846. bool collecting_letter = false;
  5847. bool collecting_special = false;
  5848. bool collecting_whitespace_lookahead = false;
  5849. bool collecting = false;
  5850. std::vector<std::string> text_utf;
  5851. text_utf.reserve(text.size());
  5852. bpe_words.reserve(text.size());
  5853. bpe_encoded_words.reserve(text.size());
  5854. auto cps = codepoints_from_utf8(text);
  5855. for (size_t i = 0; i < cps.size(); ++i)
  5856. text_utf.emplace_back(codepoint_to_utf8(cps[i]));
  5857. for (int i = 0; i < (int)text_utf.size(); i++) {
  5858. const std::string & utf_char = text_utf[i];
  5859. bool split_condition = false;
  5860. int bytes_remain = text_utf.size() - i;
  5861. // forward backward lookups
  5862. const std::string & utf_char_next = (i + 1 < (int)text_utf.size()) ? text_utf[i + 1] : "";
  5863. const std::string & utf_char_next_next = (i + 2 < (int)text_utf.size()) ? text_utf[i + 2] : "";
  5864. // handling contractions
  5865. if (!split_condition && bytes_remain >= 2) {
  5866. // 's|'t|'m|'d
  5867. if (utf_char == "\'" && (utf_char_next == "s" || utf_char_next == "t" || utf_char_next == "m" || utf_char_next == "d")) {
  5868. split_condition = true;
  5869. }
  5870. if (split_condition) {
  5871. if (token.size()) {
  5872. bpe_words.emplace_back(token); // push previous content as token
  5873. }
  5874. token = utf_char + utf_char_next;
  5875. bpe_words.emplace_back(token);
  5876. token = "";
  5877. i++;
  5878. continue;
  5879. }
  5880. }
  5881. if (!split_condition && bytes_remain >= 3) {
  5882. // 're|'ve|'ll
  5883. if (utf_char == "\'" && (
  5884. (utf_char_next == "r" && utf_char_next_next == "e") ||
  5885. (utf_char_next == "v" && utf_char_next_next == "e") ||
  5886. (utf_char_next == "l" && utf_char_next_next == "l"))
  5887. ) {
  5888. split_condition = true;
  5889. }
  5890. if (split_condition) {
  5891. // current token + next token can be defined
  5892. if (token.size()) {
  5893. bpe_words.emplace_back(token); // push previous content as token
  5894. }
  5895. token = utf_char + utf_char_next + utf_char_next_next;
  5896. bpe_words.emplace_back(token); // the contraction
  5897. token = "";
  5898. i += 2;
  5899. continue;
  5900. }
  5901. }
  5902. if (!split_condition && !collecting) {
  5903. if (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER)) {
  5904. collecting_letter = true;
  5905. collecting = true;
  5906. }
  5907. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
  5908. collecting_numeric = true;
  5909. collecting = true;
  5910. }
  5911. else if (
  5912. ((codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) && (codepoint_type(utf_char) != CODEPOINT_TYPE_WHITESPACE)) ||
  5913. (!token.size() && utf_char == " " && codepoint_type(utf_char_next) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char_next) != CODEPOINT_TYPE_DIGIT && codepoint_type(utf_char_next) != CODEPOINT_TYPE_WHITESPACE)
  5914. ) {
  5915. collecting_special = true;
  5916. collecting = true;
  5917. }
  5918. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE && codepoint_type(utf_char_next) == CODEPOINT_TYPE_WHITESPACE) {
  5919. collecting_whitespace_lookahead = true;
  5920. collecting = true;
  5921. }
  5922. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE) {
  5923. split_condition = true;
  5924. }
  5925. }
  5926. else if (!split_condition && collecting) {
  5927. if (collecting_letter && codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER) {
  5928. split_condition = true;
  5929. }
  5930. else if (collecting_numeric && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) {
  5931. split_condition = true;
  5932. }
  5933. else if (collecting_special && (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE)) {
  5934. split_condition = true;
  5935. }
  5936. else if (collecting_whitespace_lookahead && (codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
  5937. split_condition = true;
  5938. }
  5939. }
  5940. if (utf_char_next == "") {
  5941. split_condition = true; // final
  5942. token += utf_char;
  5943. }
  5944. if (split_condition) {
  5945. if (token.size()) {
  5946. bpe_words.emplace_back(token);
  5947. }
  5948. token = utf_char;
  5949. collecting = false;
  5950. collecting_letter = false;
  5951. collecting_numeric = false;
  5952. collecting_special = false;
  5953. collecting_whitespace_lookahead = false;
  5954. }
  5955. else {
  5956. token += utf_char;
  5957. }
  5958. }
  5959. for (std::string & word : bpe_words) {
  5960. std::string encoded_token = "";
  5961. for (char & c : word) {
  5962. encoded_token += bytes_to_unicode_bpe(c);
  5963. }
  5964. bpe_encoded_words.emplace_back(encoded_token);
  5965. }
  5966. return bpe_encoded_words;
  5967. }
  5968. const llama_vocab & vocab;
  5969. std::vector<llm_symbol> symbols;
  5970. std::vector<llm_symbol> symbols_final;
  5971. llm_bigram_bpe::queue work_queue;
  5972. };
  5973. typedef enum FRAGMENT_BUFFER_VARIANT_TYPE{
  5974. FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
  5975. FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
  5976. } FRAGMENT_BUFFER_VARIANT_TYPE;
  5977. struct fragment_buffer_variant{
  5978. fragment_buffer_variant(llama_vocab::id _token)
  5979. :
  5980. type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
  5981. token(_token),
  5982. raw_text(_dummy),
  5983. offset(0),
  5984. length(0){}
  5985. fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
  5986. :
  5987. type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
  5988. token((llama_vocab::id)-1),
  5989. raw_text(_raw_text),
  5990. offset(_offset),
  5991. length(_length){
  5992. GGML_ASSERT( _offset >= 0 );
  5993. GGML_ASSERT( _length >= 1 );
  5994. GGML_ASSERT( offset + length <= raw_text.length() );
  5995. }
  5996. const FRAGMENT_BUFFER_VARIANT_TYPE type;
  5997. const llama_vocab::id token;
  5998. const std::string _dummy;
  5999. const std::string & raw_text;
  6000. const uint64_t offset;
  6001. const uint64_t length;
  6002. };
  6003. // #define PRETOKENIZERDEBUG
  6004. static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer)
  6005. {
  6006. // for each special token
  6007. for (const auto & st: vocab.special_tokens_cache) {
  6008. const auto & special_token = st.first;
  6009. const auto & special_id = st.second;
  6010. // for each text fragment
  6011. std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
  6012. while (it != buffer.end()) {
  6013. auto & fragment = (*it);
  6014. // if a fragment is text ( not yet processed )
  6015. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  6016. auto * raw_text = &(fragment.raw_text);
  6017. auto raw_text_base_offset = fragment.offset;
  6018. auto raw_text_base_length = fragment.length;
  6019. // loop over the text
  6020. while (true) {
  6021. // find the first occurrence of a given special token in this fragment
  6022. // passing offset argument only limit the "search area" but match coordinates
  6023. // are still relative to the source full raw_text
  6024. auto match = raw_text->find(special_token, raw_text_base_offset);
  6025. // no occurrences found, stop processing this fragment for a given special token
  6026. if (match == std::string::npos) break;
  6027. // check if match is within bounds of offset <-> length
  6028. if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
  6029. #ifdef PRETOKENIZERDEBUG
  6030. fprintf(stderr, "FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  6031. #endif
  6032. auto source = std::distance(buffer.begin(), it);
  6033. // if match is further than base offset
  6034. // then we have some text to the left of it
  6035. if (match > raw_text_base_offset) {
  6036. // left
  6037. const int64_t left_reminder_offset = raw_text_base_offset + 0;
  6038. const int64_t left_reminder_length = match - raw_text_base_offset;
  6039. buffer.emplace_after(it, (*raw_text), left_reminder_offset, left_reminder_length);
  6040. #ifdef PRETOKENIZERDEBUG
  6041. fprintf(stderr, "FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
  6042. #endif
  6043. it++;
  6044. }
  6045. // special token
  6046. buffer.emplace_after(it, special_id);
  6047. it++;
  6048. // right
  6049. if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
  6050. const int64_t right_reminder_offset = match + special_token.length();
  6051. const int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
  6052. buffer.emplace_after(it, (*raw_text), right_reminder_offset, right_reminder_length);
  6053. #ifdef PRETOKENIZERDEBUG
  6054. fprintf(stderr, "FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
  6055. #endif
  6056. it++;
  6057. if (source == 0) {
  6058. buffer.erase_after(buffer.before_begin());
  6059. } else {
  6060. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  6061. }
  6062. // repeat for the right side
  6063. raw_text_base_offset = right_reminder_offset;
  6064. raw_text_base_length = right_reminder_length;
  6065. #ifdef PRETOKENIZERDEBUG
  6066. fprintf(stderr, "RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  6067. #endif
  6068. } else {
  6069. if (source == 0) {
  6070. buffer.erase_after(buffer.before_begin());
  6071. } else {
  6072. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  6073. }
  6074. break;
  6075. }
  6076. }
  6077. }
  6078. it++;
  6079. }
  6080. }
  6081. }
  6082. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special) {
  6083. std::vector<llama_vocab::id> output;
  6084. // OG tokenizer behavior:
  6085. //
  6086. // tokenizer.encode('', add_bos=True) returns [1]
  6087. // tokenizer.encode('', add_bos=False) returns []
  6088. if (bos && vocab.special_bos_id != -1) {
  6089. output.push_back(vocab.special_bos_id);
  6090. }
  6091. if (raw_text.empty()) {
  6092. return output;
  6093. }
  6094. std::forward_list<fragment_buffer_variant> fragment_buffer;
  6095. fragment_buffer.emplace_front( raw_text, 0, raw_text.length() );
  6096. if (special) tokenizer_st_partition( vocab, fragment_buffer );
  6097. switch (vocab.type) {
  6098. case LLAMA_VOCAB_TYPE_SPM:
  6099. {
  6100. for (const auto & fragment: fragment_buffer)
  6101. {
  6102. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT)
  6103. {
  6104. // without adding this leading whitespace, we do not get the same results as the original tokenizer
  6105. // TODO: It's likely possible to get rid of this string copy entirely
  6106. // by modifying llm_tokenizer_x to operate with string offsets like pre-tokenizer
  6107. // and passing 'add space prefix' as bool argument
  6108. //
  6109. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  6110. if (&fragment == &fragment_buffer.front()) {
  6111. raw_text = " " + raw_text; // prefix with space if the first token is not special
  6112. }
  6113. #ifdef PRETOKENIZERDEBUG
  6114. fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  6115. #endif
  6116. llm_tokenizer_spm tokenizer(vocab);
  6117. llama_escape_whitespace(raw_text);
  6118. tokenizer.tokenize(raw_text, output);
  6119. }
  6120. else // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  6121. {
  6122. output.push_back(fragment.token);
  6123. }
  6124. }
  6125. } break;
  6126. case LLAMA_VOCAB_TYPE_BPE:
  6127. {
  6128. for (const auto & fragment: fragment_buffer)
  6129. {
  6130. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT)
  6131. {
  6132. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  6133. #ifdef PRETOKENIZERDEBUG
  6134. fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  6135. #endif
  6136. llm_tokenizer_bpe tokenizer(vocab);
  6137. tokenizer.tokenize(raw_text, output);
  6138. }
  6139. else // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  6140. {
  6141. output.push_back(fragment.token);
  6142. }
  6143. }
  6144. } break;
  6145. }
  6146. return output;
  6147. }
  6148. //
  6149. // grammar - internal
  6150. //
  6151. struct llama_partial_utf8 {
  6152. uint32_t value; // bit value so far (unshifted)
  6153. int n_remain; // num bytes remaining; -1 indicates invalid sequence
  6154. };
  6155. struct llama_grammar {
  6156. const std::vector<std::vector<llama_grammar_element>> rules;
  6157. std::vector<std::vector<const llama_grammar_element *>> stacks;
  6158. // buffer for partially generated UTF-8 sequence from accepted tokens
  6159. llama_partial_utf8 partial_utf8;
  6160. };
  6161. struct llama_grammar_candidate {
  6162. size_t index;
  6163. const uint32_t * code_points;
  6164. llama_partial_utf8 partial_utf8;
  6165. };
  6166. // Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
  6167. // pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
  6168. static std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
  6169. const std::string & src,
  6170. llama_partial_utf8 partial_start) {
  6171. static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
  6172. const char * pos = src.c_str();
  6173. std::vector<uint32_t> code_points;
  6174. // common english strings have the same number of codepoints and bytes. `+ 1` for the terminating 0.
  6175. code_points.reserve(src.size() + 1);
  6176. uint32_t value = partial_start.value;
  6177. int n_remain = partial_start.n_remain;
  6178. // continue previous decode, if applicable
  6179. while (*pos != 0 && n_remain > 0) {
  6180. uint8_t next_byte = static_cast<uint8_t>(*pos);
  6181. if ((next_byte >> 6) != 2) {
  6182. // invalid sequence, abort
  6183. code_points.push_back(0);
  6184. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, -1 });
  6185. }
  6186. value = (value << 6) + (next_byte & 0x3F);
  6187. ++pos;
  6188. --n_remain;
  6189. }
  6190. if (partial_start.n_remain > 0 && n_remain == 0) {
  6191. code_points.push_back(value);
  6192. }
  6193. // decode any subsequent utf-8 sequences, which may end in an incomplete one
  6194. while (*pos != 0) {
  6195. uint8_t first_byte = static_cast<uint8_t>(*pos);
  6196. uint8_t highbits = first_byte >> 4;
  6197. n_remain = lookup[highbits] - 1;
  6198. if (n_remain < 0) {
  6199. // invalid sequence, abort
  6200. code_points.clear();
  6201. code_points.push_back(0);
  6202. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, n_remain });
  6203. }
  6204. uint8_t mask = (1 << (7 - n_remain)) - 1;
  6205. value = first_byte & mask;
  6206. ++pos;
  6207. while (*pos != 0 && n_remain > 0) {
  6208. value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
  6209. ++pos;
  6210. --n_remain;
  6211. }
  6212. if (n_remain == 0) {
  6213. code_points.push_back(value);
  6214. }
  6215. }
  6216. code_points.push_back(0);
  6217. return std::make_pair(std::move(code_points), llama_partial_utf8{ value, n_remain });
  6218. }
  6219. // returns true iff pos points to the end of one of the definitions of a rule
  6220. static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
  6221. switch (pos->type) {
  6222. case LLAMA_GRETYPE_END: return true; // NOLINT
  6223. case LLAMA_GRETYPE_ALT: return true; // NOLINT
  6224. default: return false;
  6225. }
  6226. }
  6227. // returns true iff chr satisfies the char range at pos (regular or inverse range)
  6228. // asserts that pos is pointing to a char range element
  6229. static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
  6230. const llama_grammar_element * pos,
  6231. const uint32_t chr) {
  6232. bool found = false;
  6233. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  6234. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); // NOLINT
  6235. do {
  6236. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  6237. // inclusive range, e.g. [a-z]
  6238. found = found || (pos->value <= chr && chr <= pos[1].value);
  6239. pos += 2;
  6240. } else {
  6241. // exact char match, e.g. [a] or "a"
  6242. found = found || pos->value == chr;
  6243. pos += 1;
  6244. }
  6245. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  6246. return std::make_pair(found == is_positive_char, pos);
  6247. }
  6248. // returns true iff some continuation of the given partial UTF-8 sequence could satisfy the char
  6249. // range at pos (regular or inverse range)
  6250. // asserts that pos is pointing to a char range element
  6251. static bool llama_grammar_match_partial_char(
  6252. const llama_grammar_element * pos,
  6253. const llama_partial_utf8 partial_utf8) {
  6254. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  6255. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  6256. uint32_t partial_value = partial_utf8.value;
  6257. int n_remain = partial_utf8.n_remain;
  6258. // invalid sequence or 7-bit char split across 2 bytes (overlong)
  6259. if (n_remain < 0 || (n_remain == 1 && partial_value < 2)) {
  6260. return false;
  6261. }
  6262. // range of possible code points this partial UTF-8 sequence could complete to
  6263. uint32_t low = partial_value << (n_remain * 6);
  6264. uint32_t high = low | ((1 << (n_remain * 6)) - 1);
  6265. if (low == 0) {
  6266. if (n_remain == 2) {
  6267. low = 1 << 11;
  6268. } else if (n_remain == 3) {
  6269. low = 1 << 16;
  6270. }
  6271. }
  6272. do {
  6273. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  6274. // inclusive range, e.g. [a-z]
  6275. if (pos->value <= high && low <= pos[1].value) {
  6276. return is_positive_char;
  6277. }
  6278. pos += 2;
  6279. } else {
  6280. // exact char match, e.g. [a] or "a"
  6281. if (low <= pos->value && pos->value <= high) {
  6282. return is_positive_char;
  6283. }
  6284. pos += 1;
  6285. }
  6286. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  6287. return !is_positive_char;
  6288. }
  6289. // transforms a grammar pushdown stack into N possible stacks, all ending
  6290. // at a character range (terminal element)
  6291. static void llama_grammar_advance_stack(
  6292. const std::vector<std::vector<llama_grammar_element>> & rules,
  6293. const std::vector<const llama_grammar_element *> & stack,
  6294. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  6295. if (stack.empty()) {
  6296. new_stacks.emplace_back(stack);
  6297. return;
  6298. }
  6299. const llama_grammar_element * pos = stack.back();
  6300. switch (pos->type) {
  6301. case LLAMA_GRETYPE_RULE_REF: {
  6302. const size_t rule_id = static_cast<size_t>(pos->value);
  6303. const llama_grammar_element * subpos = rules[rule_id].data();
  6304. do {
  6305. // init new stack without the top (pos)
  6306. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  6307. if (!llama_grammar_is_end_of_sequence(pos + 1)) {
  6308. // if this rule ref is followed by another element, add that to stack
  6309. new_stack.push_back(pos + 1);
  6310. }
  6311. if (!llama_grammar_is_end_of_sequence(subpos)) {
  6312. // if alternate is nonempty, add to stack
  6313. new_stack.push_back(subpos);
  6314. }
  6315. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  6316. while (!llama_grammar_is_end_of_sequence(subpos)) {
  6317. // scan to end of alternate def
  6318. subpos++;
  6319. }
  6320. if (subpos->type == LLAMA_GRETYPE_ALT) {
  6321. // there's another alternate def of this rule to process
  6322. subpos++;
  6323. } else {
  6324. break;
  6325. }
  6326. } while (true);
  6327. break;
  6328. }
  6329. case LLAMA_GRETYPE_CHAR:
  6330. case LLAMA_GRETYPE_CHAR_NOT:
  6331. new_stacks.emplace_back(stack);
  6332. break;
  6333. default:
  6334. // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
  6335. // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
  6336. // those
  6337. GGML_ASSERT(false);
  6338. }
  6339. }
  6340. // takes a set of possible pushdown stacks on a grammar, which are required to
  6341. // be positioned at a character range (see `llama_grammar_advance_stack`), and
  6342. // produces the N possible stacks if the given char is accepted at those
  6343. // positions
  6344. static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
  6345. const std::vector<std::vector<llama_grammar_element>> & rules,
  6346. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  6347. const uint32_t chr) {
  6348. std::vector<std::vector<const llama_grammar_element *>> new_stacks;
  6349. for (const auto & stack : stacks) {
  6350. if (stack.empty()) {
  6351. continue;
  6352. }
  6353. auto match = llama_grammar_match_char(stack.back(), chr);
  6354. if (match.first) {
  6355. const llama_grammar_element * pos = match.second;
  6356. // update top of stack to next element, if any
  6357. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  6358. if (!llama_grammar_is_end_of_sequence(pos)) {
  6359. new_stack.push_back(pos);
  6360. }
  6361. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  6362. }
  6363. }
  6364. return new_stacks;
  6365. }
  6366. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  6367. const std::vector<std::vector<llama_grammar_element>> & rules,
  6368. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  6369. const std::vector<llama_grammar_candidate> & candidates);
  6370. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
  6371. const std::vector<std::vector<llama_grammar_element>> & rules,
  6372. const std::vector<const llama_grammar_element *> & stack,
  6373. const std::vector<llama_grammar_candidate> & candidates) {
  6374. std::vector<llama_grammar_candidate> rejects;
  6375. if (stack.empty()) {
  6376. for (const auto & tok : candidates) {
  6377. if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
  6378. rejects.push_back(tok);
  6379. }
  6380. }
  6381. return rejects;
  6382. }
  6383. const llama_grammar_element * stack_pos = stack.back();
  6384. std::vector<llama_grammar_candidate> next_candidates;
  6385. for (const auto & tok : candidates) {
  6386. if (*tok.code_points == 0) {
  6387. // reached end of full codepoints in token, reject iff it ended in a partial sequence
  6388. // that cannot satisfy this position in grammar
  6389. if (tok.partial_utf8.n_remain != 0 &&
  6390. !llama_grammar_match_partial_char(stack_pos, tok.partial_utf8)) {
  6391. rejects.push_back(tok);
  6392. }
  6393. } else if (llama_grammar_match_char(stack_pos, *tok.code_points).first) {
  6394. next_candidates.push_back({ tok.index, tok.code_points + 1, tok.partial_utf8 });
  6395. } else {
  6396. rejects.push_back(tok);
  6397. }
  6398. }
  6399. const auto * stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
  6400. // update top of stack to next element, if any
  6401. std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
  6402. if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
  6403. stack_after.push_back(stack_pos_after);
  6404. }
  6405. std::vector<std::vector<const llama_grammar_element *>> next_stacks;
  6406. llama_grammar_advance_stack(rules, stack_after, next_stacks);
  6407. auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
  6408. for (const auto & tok : next_rejects) {
  6409. rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
  6410. }
  6411. return rejects;
  6412. }
  6413. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  6414. const std::vector<std::vector<llama_grammar_element>> & rules,
  6415. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  6416. const std::vector<llama_grammar_candidate> & candidates) {
  6417. GGML_ASSERT(!stacks.empty()); // REVIEW
  6418. if (candidates.empty()) {
  6419. return std::vector<llama_grammar_candidate>();
  6420. }
  6421. auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
  6422. for (size_t i = 1, size = stacks.size(); i < size; ++i) {
  6423. rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
  6424. }
  6425. return rejects;
  6426. }
  6427. //
  6428. // grammar - external
  6429. //
  6430. struct llama_grammar * llama_grammar_init(
  6431. const llama_grammar_element ** rules,
  6432. size_t n_rules,
  6433. size_t start_rule_index) {
  6434. const llama_grammar_element * pos;
  6435. // copy rule definitions into vectors
  6436. std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
  6437. for (size_t i = 0; i < n_rules; i++) {
  6438. for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
  6439. vec_rules[i].push_back(*pos);
  6440. }
  6441. vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
  6442. }
  6443. // loop over alternates of start rule to build initial stacks
  6444. std::vector<std::vector<const llama_grammar_element *>> stacks;
  6445. pos = rules[start_rule_index];
  6446. do {
  6447. std::vector<const llama_grammar_element *> stack;
  6448. if (!llama_grammar_is_end_of_sequence(pos)) {
  6449. // if alternate is nonempty, add to stack
  6450. stack.push_back(pos);
  6451. }
  6452. llama_grammar_advance_stack(vec_rules, stack, stacks);
  6453. while (!llama_grammar_is_end_of_sequence(pos)) {
  6454. // scan to end of alternate def
  6455. pos++;
  6456. }
  6457. if (pos->type == LLAMA_GRETYPE_ALT) {
  6458. // there's another alternate def of this rule to process
  6459. pos++;
  6460. } else {
  6461. break;
  6462. }
  6463. } while (true);
  6464. return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} };
  6465. }
  6466. void llama_grammar_free(struct llama_grammar * grammar) {
  6467. delete grammar;
  6468. }
  6469. struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar) {
  6470. llama_grammar * result = new llama_grammar{ grammar->rules, grammar->stacks, grammar->partial_utf8 };
  6471. // redirect elements in stacks to point to new rules
  6472. for (size_t is = 0; is < result->stacks.size(); is++) {
  6473. for (size_t ie = 0; ie < result->stacks[is].size(); ie++) {
  6474. for (size_t ir0 = 0; ir0 < grammar->rules.size(); ir0++) {
  6475. for (size_t ir1 = 0; ir1 < grammar->rules[ir0].size(); ir1++) {
  6476. if (grammar->stacks[is][ie] == &grammar->rules[ir0][ir1]) {
  6477. result->stacks[is][ie] = &result->rules[ir0][ir1];
  6478. }
  6479. }
  6480. }
  6481. }
  6482. }
  6483. return result;
  6484. }
  6485. //
  6486. // sampling
  6487. //
  6488. void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
  6489. if (seed == LLAMA_DEFAULT_SEED) {
  6490. seed = time(NULL);
  6491. }
  6492. ctx->rng.seed(seed);
  6493. }
  6494. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  6495. GGML_ASSERT(candidates->size > 0);
  6496. const int64_t t_start_sample_us = ggml_time_us();
  6497. // Sort the logits in descending order
  6498. if (!candidates->sorted) {
  6499. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  6500. return a.logit > b.logit;
  6501. });
  6502. candidates->sorted = true;
  6503. }
  6504. float max_l = candidates->data[0].logit;
  6505. float cum_sum = 0.0f;
  6506. for (size_t i = 0; i < candidates->size; ++i) {
  6507. float p = expf(candidates->data[i].logit - max_l);
  6508. candidates->data[i].p = p;
  6509. cum_sum += p;
  6510. }
  6511. for (size_t i = 0; i < candidates->size; ++i) {
  6512. candidates->data[i].p /= cum_sum;
  6513. }
  6514. if (ctx) {
  6515. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6516. }
  6517. }
  6518. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep) {
  6519. const int64_t t_start_sample_us = ggml_time_us();
  6520. k = std::max(k, (int) min_keep);
  6521. k = std::min(k, (int) candidates->size);
  6522. // Sort scores in descending order
  6523. if (!candidates->sorted) {
  6524. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  6525. return a.logit > b.logit;
  6526. };
  6527. if (k == (int) candidates->size) {
  6528. std::sort(candidates->data, candidates->data + candidates->size, comp);
  6529. } else {
  6530. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  6531. }
  6532. candidates->sorted = true;
  6533. }
  6534. candidates->size = k;
  6535. if (ctx) {
  6536. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6537. }
  6538. }
  6539. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  6540. if (p >= 1.0f) {
  6541. return;
  6542. }
  6543. llama_sample_softmax(ctx, candidates);
  6544. const int64_t t_start_sample_us = ggml_time_us();
  6545. // Compute the cumulative probabilities
  6546. float cum_sum = 0.0f;
  6547. size_t last_idx = candidates->size;
  6548. for (size_t i = 0; i < candidates->size; ++i) {
  6549. cum_sum += candidates->data[i].p;
  6550. // Check if the running sum is at least p or if we have kept at least min_keep tokens
  6551. // we set the last index to i+1 to indicate that the current iterate should be included in the set
  6552. if (cum_sum >= p && i + 1 >= min_keep) {
  6553. last_idx = i + 1;
  6554. break;
  6555. }
  6556. }
  6557. // Resize the output vector to keep only the top-p tokens
  6558. candidates->size = last_idx;
  6559. if (ctx) {
  6560. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6561. }
  6562. }
  6563. void llama_sample_min_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  6564. if (p <= 0.0f || !candidates->size) {
  6565. return;
  6566. }
  6567. llama_sample_softmax(ctx, candidates);
  6568. const int64_t t_start_sample_us = ggml_time_us();
  6569. float scale = candidates->data[0].p; // scale by max prob
  6570. size_t i = 1; // first token always matches
  6571. for (; i < candidates->size; ++i) {
  6572. if (candidates->data[i].p < p * scale && i >= min_keep) {
  6573. break; // prob too small
  6574. }
  6575. }
  6576. // Resize the output vector to keep only the matching tokens
  6577. candidates->size = i;
  6578. if (ctx) {
  6579. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6580. }
  6581. }
  6582. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  6583. if (z >= 1.0f || candidates->size <= 2) {
  6584. return;
  6585. }
  6586. llama_sample_softmax(nullptr, candidates);
  6587. const int64_t t_start_sample_us = ggml_time_us();
  6588. // Compute the first and second derivatives
  6589. std::vector<float> first_derivatives(candidates->size - 1);
  6590. std::vector<float> second_derivatives(candidates->size - 2);
  6591. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  6592. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  6593. }
  6594. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  6595. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  6596. }
  6597. // Calculate absolute value of second derivatives
  6598. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  6599. second_derivatives[i] = std::abs(second_derivatives[i]);
  6600. }
  6601. // Normalize the second derivatives
  6602. {
  6603. const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  6604. if (second_derivatives_sum > 1e-6f) {
  6605. for (float & value : second_derivatives) {
  6606. value /= second_derivatives_sum;
  6607. }
  6608. } else {
  6609. for (float & value : second_derivatives) {
  6610. value = 1.0f / second_derivatives.size();
  6611. }
  6612. }
  6613. }
  6614. float cum_sum = 0.0f;
  6615. size_t last_idx = candidates->size;
  6616. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  6617. cum_sum += second_derivatives[i];
  6618. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  6619. if (cum_sum > z && i >= min_keep) {
  6620. last_idx = i;
  6621. break;
  6622. }
  6623. }
  6624. // Resize the output vector to keep only the tokens above the tail location
  6625. candidates->size = last_idx;
  6626. if (ctx) {
  6627. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6628. }
  6629. }
  6630. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  6631. // Reference implementation:
  6632. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  6633. if (p >= 1.0f) {
  6634. return;
  6635. }
  6636. // Compute the softmax of logits and calculate entropy
  6637. llama_sample_softmax(nullptr, candidates);
  6638. const int64_t t_start_sample_us = ggml_time_us();
  6639. float entropy = 0.0f;
  6640. for (size_t i = 0; i < candidates->size; ++i) {
  6641. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  6642. }
  6643. // Compute the absolute difference between negative log probability and entropy for each candidate
  6644. std::vector<float> shifted_scores;
  6645. for (size_t i = 0; i < candidates->size; ++i) {
  6646. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  6647. shifted_scores.push_back(shifted_score);
  6648. }
  6649. // Sort tokens based on the shifted_scores and their corresponding indices
  6650. std::vector<size_t> indices(candidates->size);
  6651. std::iota(indices.begin(), indices.end(), 0);
  6652. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  6653. return shifted_scores[a] < shifted_scores[b];
  6654. });
  6655. // Compute the cumulative probabilities
  6656. float cum_sum = 0.0f;
  6657. size_t last_idx = indices.size();
  6658. for (size_t i = 0; i < indices.size(); ++i) {
  6659. size_t idx = indices[i];
  6660. cum_sum += candidates->data[idx].p;
  6661. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  6662. if (cum_sum > p && i >= min_keep - 1) {
  6663. last_idx = i + 1;
  6664. break;
  6665. }
  6666. }
  6667. // Resize the output vector to keep only the locally typical tokens
  6668. std::vector<llama_token_data> new_candidates;
  6669. for (size_t i = 0; i < last_idx; ++i) {
  6670. size_t idx = indices[i];
  6671. new_candidates.push_back(candidates->data[idx]);
  6672. }
  6673. // Replace the data in candidates with the new_candidates data
  6674. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  6675. candidates->size = new_candidates.size();
  6676. candidates->sorted = false;
  6677. if (ctx) {
  6678. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6679. }
  6680. }
  6681. void llama_sample_temp(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  6682. const int64_t t_start_sample_us = ggml_time_us();
  6683. for (size_t i = 0; i < candidates_p->size; ++i) {
  6684. candidates_p->data[i].logit /= temp;
  6685. }
  6686. if (ctx) {
  6687. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6688. }
  6689. }
  6690. void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  6691. llama_sample_temp(ctx, candidates_p, temp);
  6692. }
  6693. void llama_sample_repetition_penalties(
  6694. struct llama_context * ctx,
  6695. llama_token_data_array * candidates,
  6696. const llama_token * last_tokens,
  6697. size_t penalty_last_n,
  6698. float penalty_repeat,
  6699. float penalty_freq,
  6700. float penalty_present) {
  6701. if (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f)) {
  6702. return;
  6703. }
  6704. const int64_t t_start_sample_us = ggml_time_us();
  6705. // Create a frequency map to count occurrences of each token in last_tokens
  6706. std::unordered_map<llama_token, int> token_count;
  6707. for (size_t i = 0; i < penalty_last_n; ++i) {
  6708. token_count[last_tokens[i]]++;
  6709. }
  6710. // Apply frequency and presence penalties to the candidates
  6711. for (size_t i = 0; i < candidates->size; ++i) {
  6712. const auto token_iter = token_count.find(candidates->data[i].id);
  6713. if (token_iter == token_count.end()) {
  6714. continue;
  6715. }
  6716. const int count = token_iter->second;
  6717. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  6718. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  6719. if (candidates->data[i].logit <= 0) {
  6720. candidates->data[i].logit *= penalty_repeat;
  6721. } else {
  6722. candidates->data[i].logit /= penalty_repeat;
  6723. }
  6724. candidates->data[i].logit -= float(count) * penalty_freq + float(count > 0) * penalty_present;
  6725. }
  6726. candidates->sorted = false;
  6727. if (ctx) {
  6728. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6729. }
  6730. }
  6731. void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
  6732. GGML_ASSERT(ctx);
  6733. const int64_t t_start_sample_us = ggml_time_us();
  6734. bool allow_eos = false;
  6735. for (const auto & stack : grammar->stacks) {
  6736. if (stack.empty()) {
  6737. allow_eos = true;
  6738. break;
  6739. }
  6740. }
  6741. const llama_token eos = llama_token_eos(&ctx->model);
  6742. std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
  6743. candidates_decoded.reserve(candidates->size);
  6744. std::vector<llama_grammar_candidate> candidates_grammar;
  6745. candidates_grammar.reserve(candidates->size);
  6746. for (size_t i = 0; i < candidates->size; ++i) {
  6747. const llama_token id = candidates->data[i].id;
  6748. const std::string piece = llama_token_to_piece(ctx, id);
  6749. if (id == eos) {
  6750. if (!allow_eos) {
  6751. candidates->data[i].logit = -INFINITY;
  6752. }
  6753. } else if (piece.empty() || piece[0] == 0) {
  6754. candidates->data[i].logit = -INFINITY;
  6755. } else {
  6756. candidates_decoded.push_back(decode_utf8(piece, grammar->partial_utf8));
  6757. candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second });
  6758. }
  6759. }
  6760. const auto rejects = llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
  6761. for (const auto & reject : rejects) {
  6762. candidates->data[reject.index].logit = -INFINITY;
  6763. }
  6764. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6765. }
  6766. static void llama_log_softmax(float * array, size_t size) {
  6767. float max_l = *std::max_element(array, array + size);
  6768. float sum = 0.f;
  6769. for (size_t i = 0; i < size; ++i) {
  6770. float p = expf(array[i] - max_l);
  6771. sum += p;
  6772. array[i] = p;
  6773. }
  6774. for (size_t i = 0; i < size; ++i) {
  6775. array[i] = logf(array[i] / sum);
  6776. }
  6777. }
  6778. void llama_sample_classifier_free_guidance(
  6779. struct llama_context * ctx,
  6780. llama_token_data_array * candidates,
  6781. struct llama_context * guidance_ctx,
  6782. float scale) {
  6783. int64_t t_start_sample_us = ggml_time_us();
  6784. GGML_ASSERT(ctx);
  6785. auto n_vocab = llama_n_vocab(llama_get_model(ctx));
  6786. GGML_ASSERT(n_vocab == (int)candidates->size);
  6787. GGML_ASSERT(!candidates->sorted);
  6788. std::vector<float> logits_base;
  6789. logits_base.reserve(candidates->size);
  6790. for (size_t i = 0; i < candidates->size; ++i) {
  6791. logits_base.push_back(candidates->data[i].logit);
  6792. }
  6793. llama_log_softmax(logits_base.data(), candidates->size);
  6794. float* logits_guidance = llama_get_logits(guidance_ctx);
  6795. llama_log_softmax(logits_guidance, n_vocab);
  6796. for (int i = 0; i < n_vocab; ++i) {
  6797. float logit_guidance = logits_guidance[i];
  6798. float logit_base = logits_base[i];
  6799. candidates->data[i].logit = scale * (logit_base - logit_guidance) + logit_guidance;
  6800. }
  6801. if (ctx) {
  6802. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6803. }
  6804. }
  6805. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) {
  6806. GGML_ASSERT(ctx);
  6807. auto N = float(llama_n_vocab(llama_get_model(ctx)));
  6808. int64_t t_start_sample_us;
  6809. t_start_sample_us = ggml_time_us();
  6810. llama_sample_softmax(nullptr, candidates);
  6811. // Estimate s_hat using the most probable m tokens
  6812. float s_hat = 0.0;
  6813. float sum_ti_bi = 0.0;
  6814. float sum_ti_sq = 0.0;
  6815. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  6816. float t_i = logf(float(i + 2) / float(i + 1));
  6817. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  6818. sum_ti_bi += t_i * b_i;
  6819. sum_ti_sq += t_i * t_i;
  6820. }
  6821. s_hat = sum_ti_bi / sum_ti_sq;
  6822. // Compute k from the estimated s_hat and target surprise value
  6823. float epsilon_hat = s_hat - 1;
  6824. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  6825. // Sample the next word X using top-k sampling
  6826. llama_sample_top_k(nullptr, candidates, int(k), 1);
  6827. if (ctx) {
  6828. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6829. }
  6830. llama_token X = llama_sample_token(ctx, candidates);
  6831. t_start_sample_us = ggml_time_us();
  6832. // Compute error as the difference between observed surprise and target surprise value
  6833. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  6834. return candidate.id == X;
  6835. }));
  6836. float observed_surprise = -log2f(candidates->data[X_idx].p);
  6837. float e = observed_surprise - tau;
  6838. // Update mu using the learning rate and error
  6839. *mu = *mu - eta * e;
  6840. if (ctx) {
  6841. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6842. }
  6843. return X;
  6844. }
  6845. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  6846. int64_t t_start_sample_us;
  6847. t_start_sample_us = ggml_time_us();
  6848. llama_sample_softmax(ctx, candidates);
  6849. // Truncate the words with surprise values greater than mu
  6850. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  6851. return -log2f(candidate.p) > *mu;
  6852. }));
  6853. if (candidates->size == 0) {
  6854. candidates->size = 1;
  6855. }
  6856. if (ctx) {
  6857. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6858. }
  6859. // Normalize the probabilities of the remaining words
  6860. llama_sample_softmax(ctx, candidates);
  6861. // Sample the next word X from the remaining words
  6862. llama_token X = llama_sample_token(ctx, candidates);
  6863. t_start_sample_us = ggml_time_us();
  6864. // Compute error as the difference between observed surprise and target surprise value
  6865. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  6866. return candidate.id == X;
  6867. }));
  6868. float observed_surprise = -log2f(candidates->data[X_idx].p);
  6869. float e = observed_surprise - tau;
  6870. // Update mu using the learning rate and error
  6871. *mu = *mu - eta * e;
  6872. if (ctx) {
  6873. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6874. }
  6875. return X;
  6876. }
  6877. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  6878. const int64_t t_start_sample_us = ggml_time_us();
  6879. // Find max element
  6880. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  6881. return a.logit < b.logit;
  6882. });
  6883. llama_token result = max_iter->id;
  6884. if (ctx) {
  6885. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6886. ctx->n_sample++;
  6887. }
  6888. return result;
  6889. }
  6890. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  6891. GGML_ASSERT(ctx);
  6892. const int64_t t_start_sample_us = ggml_time_us();
  6893. llama_sample_softmax(nullptr, candidates);
  6894. std::vector<float> probs;
  6895. probs.reserve(candidates->size);
  6896. for (size_t i = 0; i < candidates->size; ++i) {
  6897. probs.push_back(candidates->data[i].p);
  6898. }
  6899. std::discrete_distribution<> dist(probs.begin(), probs.end());
  6900. auto & rng = ctx->rng;
  6901. int idx = dist(rng);
  6902. llama_token result = candidates->data[idx].id;
  6903. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6904. ctx->n_sample++;
  6905. return result;
  6906. }
  6907. void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
  6908. const int64_t t_start_sample_us = ggml_time_us();
  6909. if (token == llama_token_eos(&ctx->model)) {
  6910. for (const auto & stack : grammar->stacks) {
  6911. if (stack.empty()) {
  6912. return;
  6913. }
  6914. }
  6915. GGML_ASSERT(false);
  6916. }
  6917. const std::string piece = llama_token_to_piece(ctx, token);
  6918. // Note terminating 0 in decoded string
  6919. const auto decoded = decode_utf8(piece, grammar->partial_utf8);
  6920. const auto & code_points = decoded.first;
  6921. for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
  6922. grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
  6923. }
  6924. grammar->partial_utf8 = decoded.second;
  6925. GGML_ASSERT(!grammar->stacks.empty());
  6926. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6927. }
  6928. //
  6929. // Beam search
  6930. //
  6931. struct llama_beam {
  6932. std::vector<llama_token> tokens;
  6933. float p; // Cumulative beam probability (renormalized relative to all beams)
  6934. bool eob; // Initialize end-of-beam to false. Callback sets this to true.
  6935. // Sort beams by probability. In case of ties, prefer beams at eob.
  6936. bool operator<(const llama_beam & rhs) const {
  6937. return std::make_pair(p, eob) < std::make_pair(rhs.p, rhs.eob);
  6938. }
  6939. // Shift off first n tokens and discard them.
  6940. void shift_tokens(const size_t n) {
  6941. if (n) {
  6942. std::copy(tokens.begin() + n, tokens.end(), tokens.begin());
  6943. tokens.resize(tokens.size() - n);
  6944. }
  6945. }
  6946. llama_beam_view view() const { return {tokens.data(), tokens.size(), p, eob}; }
  6947. };
  6948. // A struct for calculating logit-related info.
  6949. struct llama_logit_info {
  6950. const float * const logits;
  6951. const int n_vocab;
  6952. const float max_l;
  6953. const float normalizer;
  6954. struct sum_exp {
  6955. float max_l;
  6956. float operator()(float sum, float l) const { return sum + std::exp(l - max_l); }
  6957. };
  6958. llama_logit_info(llama_context * ctx)
  6959. : logits(llama_get_logits(ctx))
  6960. , n_vocab(llama_n_vocab(llama_get_model(ctx)))
  6961. , max_l(*std::max_element(logits, logits + n_vocab))
  6962. , normalizer(1.0f / std::accumulate(logits, logits + n_vocab, 0.0f, sum_exp{max_l}))
  6963. { }
  6964. llama_token_data get_token_data(const llama_token token_id) const {
  6965. constexpr auto p = std::numeric_limits<float>::quiet_NaN(); // never used
  6966. return {token_id, logits[token_id], p};
  6967. }
  6968. // Return top k token_data by logit.
  6969. std::vector<llama_token_data> top_k(size_t k) {
  6970. std::vector<llama_token_data> min_heap; // min-heap by logit
  6971. const llama_token k_min = std::min(static_cast<llama_token>(k), n_vocab);
  6972. min_heap.reserve(k_min);
  6973. for (llama_token token_id = 0 ; token_id < k_min ; ++token_id) {
  6974. min_heap.push_back(get_token_data(token_id));
  6975. }
  6976. auto comp = [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; };
  6977. std::make_heap(min_heap.begin(), min_heap.end(), comp);
  6978. for (llama_token token_id = k_min ; token_id < n_vocab ; ++token_id) {
  6979. if (min_heap.front().logit < logits[token_id]) {
  6980. std::pop_heap(min_heap.begin(), min_heap.end(), comp);
  6981. min_heap.back().id = token_id;
  6982. min_heap.back().logit = logits[token_id];
  6983. std::push_heap(min_heap.begin(), min_heap.end(), comp);
  6984. }
  6985. }
  6986. return min_heap;
  6987. }
  6988. float probability_from_logit(float logit) const {
  6989. return normalizer * std::exp(logit - max_l);
  6990. }
  6991. };
  6992. struct llama_beam_search_data {
  6993. llama_context * ctx;
  6994. size_t n_beams;
  6995. int n_past;
  6996. int n_predict;
  6997. std::vector<llama_beam> beams;
  6998. std::vector<llama_beam> next_beams;
  6999. // Re-calculated on each loop iteration
  7000. size_t common_prefix_length;
  7001. // Used to communicate to/from callback on beams state.
  7002. std::vector<llama_beam_view> beam_views;
  7003. llama_beam_search_data(llama_context * ctx, size_t n_beams, int n_past, int n_predict)
  7004. : ctx(ctx)
  7005. , n_beams(n_beams)
  7006. , n_past(n_past)
  7007. , n_predict(n_predict)
  7008. , beam_views(n_beams) {
  7009. beams.reserve(n_beams);
  7010. next_beams.reserve(n_beams);
  7011. }
  7012. // Collapse beams to a single beam given by index.
  7013. void collapse_beams(const size_t beam_idx) {
  7014. if (0u < beam_idx) {
  7015. std::swap(beams[0], beams[beam_idx]);
  7016. }
  7017. beams.resize(1);
  7018. }
  7019. // Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
  7020. // The repetitive patterns below reflect the 2 stages of heaps:
  7021. // * Gather elements until the vector is full, then call std::make_heap() on it.
  7022. // * If the heap is full and a new element is found that should be included, pop the
  7023. // least element to the back(), replace it with the new, then push it into the heap.
  7024. void fill_next_beams_by_top_probabilities(llama_beam & beam) {
  7025. // Min-heaps use a greater-than comparator.
  7026. const auto comp = [](const llama_beam & a, const llama_beam & b) { return a.p > b.p; };
  7027. if (beam.eob) {
  7028. // beam is at end-of-sentence, so just copy it to next_beams if its probability is high enough.
  7029. if (next_beams.size() < n_beams) {
  7030. next_beams.push_back(std::move(beam));
  7031. if (next_beams.size() == n_beams) {
  7032. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  7033. }
  7034. } else if (next_beams.front().p < beam.p) {
  7035. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  7036. next_beams.back() = std::move(beam);
  7037. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  7038. }
  7039. } else {
  7040. // beam is not at end-of-sentence, so branch with next top_k tokens.
  7041. if (!beam.tokens.empty()) {
  7042. llama_decode(ctx, llama_batch_get_one(beam.tokens.data(), beam.tokens.size(), n_past, 0));
  7043. }
  7044. llama_logit_info logit_info(ctx);
  7045. std::vector<llama_token_data> next_tokens = logit_info.top_k(n_beams);
  7046. size_t i=0;
  7047. if (next_beams.size() < n_beams) {
  7048. for (; next_beams.size() < n_beams ; ++i) {
  7049. llama_beam next_beam = beam;
  7050. next_beam.tokens.push_back(next_tokens[i].id);
  7051. next_beam.p *= logit_info.probability_from_logit(next_tokens[i].logit);
  7052. next_beams.push_back(std::move(next_beam));
  7053. }
  7054. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  7055. } else {
  7056. for (; next_beams.front().p == 0.0f ; ++i) {
  7057. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  7058. next_beams.back() = beam;
  7059. next_beams.back().tokens.push_back(next_tokens[i].id);
  7060. next_beams.back().p *= logit_info.probability_from_logit(next_tokens[i].logit);
  7061. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  7062. }
  7063. }
  7064. for (; i < n_beams ; ++i) {
  7065. const float next_p = beam.p * logit_info.probability_from_logit(next_tokens[i].logit);
  7066. if (next_beams.front().p < next_p) {
  7067. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  7068. next_beams.back() = beam;
  7069. next_beams.back().tokens.push_back(next_tokens[i].id);
  7070. next_beams.back().p = next_p;
  7071. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  7072. }
  7073. }
  7074. }
  7075. }
  7076. // Find common_prefix_length based on beams.
  7077. // Requires beams is not empty.
  7078. size_t find_common_prefix_length() {
  7079. size_t common_prefix_length = beams[0].tokens.size();
  7080. for (size_t i = 1 ; i < beams.size() ; ++i) {
  7081. common_prefix_length = std::min(common_prefix_length, beams[i].tokens.size());
  7082. for (size_t j = 0 ; j < common_prefix_length ; ++j) {
  7083. if (beams[0].tokens[j] != beams[i].tokens[j]) {
  7084. common_prefix_length = j;
  7085. break;
  7086. }
  7087. }
  7088. }
  7089. return common_prefix_length;
  7090. }
  7091. // Construct beams_state to send back to caller via the callback function.
  7092. // Side effect: set common_prefix_length = find_common_prefix_length();
  7093. llama_beams_state get_beams_state(const bool last_call) {
  7094. for (size_t i = 0 ; i < beams.size() ; ++i) {
  7095. beam_views[i] = beams[i].view();
  7096. }
  7097. common_prefix_length = find_common_prefix_length();
  7098. return {beam_views.data(), beams.size(), common_prefix_length, last_call};
  7099. }
  7100. // Loop:
  7101. // * while i < n_predict, AND
  7102. // * any of the beams have not yet reached end-of-beam (eob), AND
  7103. // * the highest probability beam(s) (plural in case of ties) are not at end-of-sentence
  7104. // (since all other beam probabilities can only decrease)
  7105. void loop(const llama_beam_search_callback_fn_t callback, void * const callback_data) {
  7106. beams.push_back({{}, 1.0f, false}); // Start with one empty beam w/ probability = 1.0 and !eob.
  7107. const auto not_eob = [](const llama_beam & beam) { return !beam.eob; };
  7108. for (int i = 0 ; i < n_predict && std::any_of(beams.begin(),beams.end(),not_eob) &&
  7109. !beams[top_beam_index()].eob ; ++i) {
  7110. callback(callback_data, get_beams_state(false)); // Sets common_prefix_length
  7111. update_beams_from_beam_views(); // Update values (p,eob) that callback may have changed.
  7112. if (common_prefix_length) {
  7113. llama_decode(ctx, llama_batch_get_one(beams[0].tokens.data(), common_prefix_length, n_past, 0));
  7114. n_past += common_prefix_length;
  7115. }
  7116. // Zero-out next_beam probabilities to place them last in following min-heap.
  7117. std::for_each(next_beams.begin(), next_beams.end(), [](llama_beam & beam) { beam.p = 0.0f; });
  7118. for (llama_beam & beam : beams) {
  7119. beam.shift_tokens(common_prefix_length);
  7120. fill_next_beams_by_top_probabilities(beam);
  7121. }
  7122. // next_beams become the beams of next/final iteration. Swap them to re-use memory.
  7123. beams.swap(next_beams);
  7124. renormalize_beam_probabilities(beams);
  7125. }
  7126. collapse_beams(top_beam_index());
  7127. callback(callback_data, get_beams_state(true));
  7128. }
  7129. // As beams grow, the cumulative probabilities decrease.
  7130. // Renormalize them to avoid floating point underflow.
  7131. static void renormalize_beam_probabilities(std::vector<llama_beam> & beams) {
  7132. const auto sum_p = [](float sum, llama_beam & beam) { return sum + beam.p; };
  7133. const float inv_sum = 1.0f / std::accumulate(beams.begin(), beams.end(), 0.0f, sum_p);
  7134. std::for_each(beams.begin(), beams.end(), [=](llama_beam & beam) { beam.p *= inv_sum; });
  7135. }
  7136. // Assumes beams is non-empty. Uses llama_beam::operator<() for ordering.
  7137. size_t top_beam_index() {
  7138. return std::max_element(beams.begin(), beams.end()) - beams.begin();
  7139. }
  7140. // Copy (p,eob) for each beam which may have been changed by the callback.
  7141. void update_beams_from_beam_views() {
  7142. for (size_t i = 0 ; i < beams.size() ; ++i) {
  7143. beams[i].p = beam_views[i].p;
  7144. beams[i].eob = beam_views[i].eob;
  7145. }
  7146. }
  7147. };
  7148. void llama_beam_search(llama_context * ctx,
  7149. llama_beam_search_callback_fn_t callback, void * callback_data,
  7150. size_t n_beams, int n_past, int n_predict) {
  7151. assert(ctx);
  7152. const int64_t t_start_sample_us = ggml_time_us();
  7153. llama_beam_search_data beam_search_data(ctx, n_beams, n_past, n_predict);
  7154. beam_search_data.loop(callback, callback_data);
  7155. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7156. ctx->n_sample++;
  7157. }
  7158. //
  7159. // quantization
  7160. //
  7161. struct quantize_state_internal {
  7162. const llama_model & model;
  7163. const llama_model_quantize_params * params;
  7164. int n_attention_wv = 0;
  7165. int n_feed_forward_w2 = 0;
  7166. int i_attention_wv = 0;
  7167. int i_feed_forward_w2 = 0;
  7168. int n_k_quantized = 0;
  7169. int n_fallback = 0;
  7170. quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
  7171. : model(model)
  7172. , params(params)
  7173. {}
  7174. };
  7175. static void llama_convert_tensor_internal(
  7176. struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
  7177. const size_t nelements, const int nthread
  7178. ) {
  7179. if (output.size() < nelements) {
  7180. output.resize(nelements);
  7181. }
  7182. float * f32_output = (float *) output.data();
  7183. ggml_type_traits_t qtype;
  7184. if (ggml_is_quantized(tensor->type)) {
  7185. qtype = ggml_internal_get_type_traits(tensor->type);
  7186. if (qtype.to_float == NULL) {
  7187. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
  7188. }
  7189. } else if (tensor->type != GGML_TYPE_F16) {
  7190. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
  7191. }
  7192. if (nthread < 2) {
  7193. if (tensor->type == GGML_TYPE_F16) {
  7194. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
  7195. } else if (ggml_is_quantized(tensor->type)) {
  7196. qtype.to_float(tensor->data, f32_output, nelements);
  7197. } else {
  7198. GGML_ASSERT(false); // unreachable
  7199. }
  7200. return;
  7201. }
  7202. size_t block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type);
  7203. size_t block_size_bytes = ggml_type_size(tensor->type);
  7204. GGML_ASSERT(nelements % block_size == 0);
  7205. size_t nblocks = nelements / block_size;
  7206. size_t blocks_per_thread = nblocks / nthread;
  7207. size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  7208. size_t in_buff_offs = 0;
  7209. size_t out_buff_offs = 0;
  7210. for (int tnum = 0; tnum < nthread; tnum++) {
  7211. size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  7212. size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
  7213. size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  7214. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  7215. if (typ == GGML_TYPE_F16) {
  7216. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  7217. } else {
  7218. qtype.to_float(inbuf, outbuf, nels);
  7219. }
  7220. };
  7221. workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
  7222. in_buff_offs += thr_block_bytes;
  7223. out_buff_offs += thr_elems;
  7224. }
  7225. for (auto & w : workers) { w.join(); }
  7226. workers.clear();
  7227. }
  7228. static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
  7229. const std::string name = ggml_get_name(tensor);
  7230. // TODO: avoid hardcoded tensor names - use the TN_* constants
  7231. const llm_arch arch = qs.model.arch;
  7232. const auto tn = LLM_TN(arch);
  7233. auto use_more_bits = [](int i_layer, int num_layers) -> bool {
  7234. return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
  7235. };
  7236. if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
  7237. int nx = tensor->ne[0];
  7238. if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
  7239. new_type = GGML_TYPE_Q8_0;
  7240. }
  7241. else if (new_type != GGML_TYPE_Q8_0) {
  7242. new_type = GGML_TYPE_Q6_K;
  7243. }
  7244. } else if (name.find("attn_v.weight") != std::string::npos) {
  7245. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  7246. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  7247. new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  7248. }
  7249. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  7250. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  7251. use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
  7252. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
  7253. else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
  7254. (qs.i_attention_wv < qs.n_attention_wv/8 || qs.i_attention_wv >= 7*qs.n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
  7255. if (qs.model.type == MODEL_70B) {
  7256. // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
  7257. // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
  7258. // nearly negligible increase in model size by quantizing this tensor with more bits:
  7259. if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
  7260. }
  7261. if (qs.model.hparams.n_expert == 8) {
  7262. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  7263. // TODO: explore better strategies
  7264. new_type = GGML_TYPE_Q8_0;
  7265. }
  7266. ++qs.i_attention_wv;
  7267. } else if (name.find("attn_k.weight") != std::string::npos) {
  7268. if (qs.model.hparams.n_expert == 8) {
  7269. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  7270. // TODO: explore better strategies
  7271. new_type = GGML_TYPE_Q8_0;
  7272. }
  7273. } else if (name.find("ffn_down.weight") != std::string::npos) {
  7274. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  7275. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  7276. new_type = qs.i_feed_forward_w2 < 2 ? GGML_TYPE_Q5_K
  7277. : arch != LLM_ARCH_FALCON || use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2) ? GGML_TYPE_Q4_K
  7278. : GGML_TYPE_Q3_K;
  7279. }
  7280. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
  7281. new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
  7282. }
  7283. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
  7284. if (arch == LLM_ARCH_FALCON) {
  7285. new_type = qs.i_feed_forward_w2 < 2 ? GGML_TYPE_Q6_K :
  7286. use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  7287. } else {
  7288. if (use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
  7289. }
  7290. }
  7291. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
  7292. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && qs.i_feed_forward_w2 < 4) {
  7293. new_type = GGML_TYPE_Q5_K;
  7294. }
  7295. ++qs.i_feed_forward_w2;
  7296. } else if (name.find("attn_output.weight") != std::string::npos) {
  7297. if (arch != LLM_ARCH_FALCON) {
  7298. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
  7299. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) new_type = GGML_TYPE_Q4_K;
  7300. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  7301. } else {
  7302. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  7303. }
  7304. }
  7305. else if (name.find("attn_qkv.weight") != std::string::npos) {
  7306. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  7307. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
  7308. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
  7309. }
  7310. else if (name.find("ffn_gate.weight") != std::string::npos || name.find("ffn_up.weight") != std::string::npos) {
  7311. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  7312. }
  7313. // This can be used to reduce the size of the Q5_K_S model.
  7314. // The associated PPL increase is fully in line with the size reduction
  7315. //else {
  7316. // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
  7317. //}
  7318. bool convert_incompatible_tensor = false;
  7319. if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
  7320. new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) {
  7321. int nx = tensor->ne[0];
  7322. int ny = tensor->ne[1];
  7323. if (nx % QK_K != 0) {
  7324. LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
  7325. convert_incompatible_tensor = true;
  7326. } else {
  7327. ++qs.n_k_quantized;
  7328. }
  7329. }
  7330. if (convert_incompatible_tensor) {
  7331. switch (new_type) {
  7332. case GGML_TYPE_Q2_K: new_type = GGML_TYPE_Q4_0; break;
  7333. case GGML_TYPE_Q3_K: new_type = GGML_TYPE_Q4_1; break;
  7334. case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
  7335. case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
  7336. case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
  7337. default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
  7338. }
  7339. LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
  7340. ++qs.n_fallback;
  7341. }
  7342. return new_type;
  7343. }
  7344. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  7345. ggml_type quantized_type;
  7346. llama_ftype ftype = params->ftype;
  7347. switch (params->ftype) {
  7348. case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
  7349. case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
  7350. case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
  7351. case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
  7352. case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
  7353. case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
  7354. case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
  7355. // K-quants
  7356. case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
  7357. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  7358. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  7359. case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break;
  7360. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  7361. case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break;
  7362. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  7363. case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
  7364. case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
  7365. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  7366. }
  7367. int nthread = params->nthread;
  7368. if (nthread <= 0) {
  7369. nthread = std::thread::hardware_concurrency();
  7370. }
  7371. // mmap consistently increases speed Linux, and also increases speed on Windows with
  7372. // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
  7373. #if defined(__linux__) || defined(_WIN32)
  7374. constexpr bool use_mmap = true;
  7375. #else
  7376. constexpr bool use_mmap = false;
  7377. #endif
  7378. llama_model_loader ml(fname_inp, use_mmap, NULL);
  7379. ml.init_mapping(false); // no prefetching?
  7380. llama_model model;
  7381. llm_load_arch(ml, model);
  7382. llm_load_hparams(ml, model);
  7383. struct quantize_state_internal qs(model, params);
  7384. if (params->only_copy) {
  7385. ftype = model.ftype;
  7386. }
  7387. const size_t align = GGUF_DEFAULT_ALIGNMENT;
  7388. struct gguf_context * ctx_out = gguf_init_empty();
  7389. // copy the KV pairs from the input file
  7390. gguf_set_kv (ctx_out, ml.ctx_gguf);
  7391. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  7392. gguf_set_val_u32(ctx_out, "general.file_type", ftype);
  7393. for (int i = 0; i < ml.n_tensors; ++i) {
  7394. struct ggml_tensor * meta = ml.get_tensor_meta(i);
  7395. const std::string name = ggml_get_name(meta);
  7396. // TODO: avoid hardcoded tensor names - use the TN_* constants
  7397. if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
  7398. ++qs.n_attention_wv;
  7399. }
  7400. else if (name.find("ffn_down.weight") != std::string::npos) {
  7401. ++qs.n_feed_forward_w2;
  7402. }
  7403. }
  7404. if (qs.n_attention_wv != qs.n_feed_forward_w2 || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) {
  7405. LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_feed_forward_w2 = %d, hparams.n_layer = %d\n",
  7406. __func__, qs.n_attention_wv, qs.n_feed_forward_w2, model.hparams.n_layer);
  7407. }
  7408. size_t total_size_org = 0;
  7409. size_t total_size_new = 0;
  7410. std::vector<int64_t> hist_all(1 << 4, 0);
  7411. std::vector<std::thread> workers;
  7412. workers.reserve(nthread);
  7413. std::mutex mutex;
  7414. int idx = 0;
  7415. std::vector<no_init<uint8_t>> read_data;
  7416. std::vector<no_init<uint8_t>> work;
  7417. std::vector<no_init<float>> f32_conv_buf;
  7418. // populate the original tensors so we get an initial meta data
  7419. for (int i = 0; i < ml.n_tensors; ++i) {
  7420. struct ggml_tensor * meta = ml.get_tensor_meta(i);
  7421. gguf_add_tensor(ctx_out, meta);
  7422. }
  7423. std::ofstream fout(fname_out, std::ios::binary);
  7424. fout.exceptions(std::ofstream::failbit); // fail fast on write errors
  7425. const size_t meta_size = gguf_get_meta_size(ctx_out);
  7426. LLAMA_LOG_INFO("%s: meta size = %zu bytes\n", __func__, meta_size);
  7427. // placeholder for the meta data
  7428. ::zeros(fout, meta_size);
  7429. for (int i = 0; i < ml.n_tensors; ++i) {
  7430. struct ggml_tensor * tensor = ml.get_tensor_meta(i);
  7431. const std::string name = ggml_get_name(tensor);
  7432. if (!ml.use_mmap) {
  7433. if (read_data.size() < ggml_nbytes(tensor)) {
  7434. read_data.resize(ggml_nbytes(tensor));
  7435. }
  7436. tensor->data = read_data.data();
  7437. }
  7438. ml.load_data_for(tensor);
  7439. LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
  7440. ++idx, ml.n_tensors,
  7441. ggml_get_name(tensor),
  7442. llama_format_tensor_shape(tensor).c_str(),
  7443. ggml_type_name(tensor->type));
  7444. // This used to be a regex, but <regex> has an extreme cost to compile times.
  7445. bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
  7446. // quantize only 2D tensors
  7447. quantize &= (ggml_n_dims(tensor) == 2);
  7448. quantize &= params->quantize_output_tensor || name != "output.weight";
  7449. quantize &= !params->only_copy;
  7450. // do not quantize expert gating tensors
  7451. quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
  7452. enum ggml_type new_type;
  7453. void * new_data;
  7454. size_t new_size;
  7455. if (quantize) {
  7456. new_type = quantized_type;
  7457. if (!params->pure) {
  7458. new_type = get_k_quant_type(qs, new_type, tensor, ftype);
  7459. }
  7460. // If we've decided to quantize to the same type the tensor is already
  7461. // in then there's nothing to do.
  7462. quantize = tensor->type != new_type;
  7463. }
  7464. if (!quantize) {
  7465. new_type = tensor->type;
  7466. new_data = tensor->data;
  7467. new_size = ggml_nbytes(tensor);
  7468. LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
  7469. } else {
  7470. const size_t nelements = ggml_nelements(tensor);
  7471. float * f32_data;
  7472. if (tensor->type == GGML_TYPE_F32) {
  7473. f32_data = (float *) tensor->data;
  7474. } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
  7475. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
  7476. } else {
  7477. llama_convert_tensor_internal(tensor, f32_conv_buf, workers, nelements, nthread);
  7478. f32_data = (float *) f32_conv_buf.data();
  7479. }
  7480. LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
  7481. fflush(stdout);
  7482. if (work.size() < nelements * 4) {
  7483. work.resize(nelements * 4); // upper bound on size
  7484. }
  7485. new_data = work.data();
  7486. std::array<int64_t, 1 << 4> hist_cur = {};
  7487. static const int chunk_size = 32 * 512;
  7488. const int nchunk = (nelements + chunk_size - 1)/chunk_size;
  7489. const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
  7490. if (nthread_use < 2) {
  7491. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data());
  7492. } else {
  7493. size_t counter = 0;
  7494. new_size = 0;
  7495. auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements]() {
  7496. std::array<int64_t, 1 << 4> local_hist = {};
  7497. size_t local_size = 0;
  7498. while (true) {
  7499. std::unique_lock<std::mutex> lock(mutex);
  7500. size_t first = counter; counter += chunk_size;
  7501. if (first >= nelements) {
  7502. if (local_size > 0) {
  7503. for (int j=0; j<int(local_hist.size()); ++j) {
  7504. hist_cur[j] += local_hist[j];
  7505. }
  7506. new_size += local_size;
  7507. }
  7508. break;
  7509. }
  7510. lock.unlock();
  7511. size_t last = std::min(nelements, first + chunk_size);
  7512. local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data());
  7513. }
  7514. };
  7515. for (int it = 0; it < nthread_use - 1; ++it) {
  7516. workers.emplace_back(compute);
  7517. }
  7518. compute();
  7519. for (auto & w : workers) { w.join(); }
  7520. workers.clear();
  7521. }
  7522. LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB | hist: ", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
  7523. int64_t tot_count = 0;
  7524. for (size_t i = 0; i < hist_cur.size(); i++) {
  7525. hist_all[i] += hist_cur[i];
  7526. tot_count += hist_cur[i];
  7527. }
  7528. if (tot_count > 0) {
  7529. for (size_t i = 0; i < hist_cur.size(); i++) {
  7530. LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
  7531. }
  7532. }
  7533. LLAMA_LOG_INFO("\n");
  7534. }
  7535. total_size_org += ggml_nbytes(tensor);
  7536. total_size_new += new_size;
  7537. // update the gguf meta data as we go
  7538. gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
  7539. gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
  7540. // write tensor data + padding
  7541. fout.write((const char *) new_data, new_size);
  7542. zeros(fout, GGML_PAD(new_size, align) - new_size);
  7543. }
  7544. // go back to beginning of file and write the updated meta data
  7545. {
  7546. fout.seekp(0);
  7547. std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
  7548. gguf_get_meta_data(ctx_out, data.data());
  7549. fout.write((const char *) data.data(), data.size());
  7550. }
  7551. fout.close();
  7552. gguf_free(ctx_out);
  7553. LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  7554. LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  7555. // print histogram for all tensors
  7556. {
  7557. int64_t sum_all = 0;
  7558. for (size_t i = 0; i < hist_all.size(); i++) {
  7559. sum_all += hist_all[i];
  7560. }
  7561. if (sum_all > 0) {
  7562. LLAMA_LOG_INFO("%s: hist: ", __func__);
  7563. for (size_t i = 0; i < hist_all.size(); i++) {
  7564. LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
  7565. }
  7566. LLAMA_LOG_INFO("\n");
  7567. }
  7568. }
  7569. if (qs.n_fallback > 0) {
  7570. LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) incompatible with k-quants and required fallback quantization\n",
  7571. __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
  7572. }
  7573. }
  7574. static int llama_apply_lora_from_file_internal(
  7575. const struct llama_model & model, const char * path_lora, float scale, const char * path_base_model, int n_threads
  7576. ) {
  7577. LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  7578. const int64_t t_start_lora_us = ggml_time_us();
  7579. llama_file fin(path_lora, "rb");
  7580. // verify magic and version
  7581. {
  7582. uint32_t magic = fin.read_u32();
  7583. if (magic != LLAMA_FILE_MAGIC_GGLA) {
  7584. LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
  7585. return 1;
  7586. }
  7587. uint32_t format_version = fin.read_u32();
  7588. if (format_version != 1) {
  7589. LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
  7590. return 1;
  7591. }
  7592. }
  7593. int32_t lora_r = fin.read_u32();
  7594. int32_t lora_alpha = fin.read_u32();
  7595. float scaling = scale * (float)lora_alpha / (float)lora_r;
  7596. LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  7597. // create a name -> tensor map of the model to accelerate lookups
  7598. // find the max tensor size to estimate the required temporary buffer size
  7599. size_t max_tensor_size = 0;
  7600. std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
  7601. for (const auto & kv : model.tensors_by_name) {
  7602. model_tensors.insert(kv);
  7603. size_t f32_size = ggml_nelements(kv.second) * sizeof(float);
  7604. max_tensor_size = std::max(max_tensor_size, f32_size);
  7605. }
  7606. // create a temporary ggml context to store the lora tensors
  7607. // TODO: use ggml-alloc
  7608. size_t lora_ctx_size = max_tensor_size * 3;
  7609. LLAMA_LOG_INFO("%s: allocating %.f MB for lora temporary buffer\n", __func__, lora_ctx_size / 1024.0 / 1024.0);
  7610. std::vector<uint8_t> lora_buf(lora_ctx_size);
  7611. struct ggml_init_params params;
  7612. params.mem_size = lora_buf.size();
  7613. params.mem_buffer = lora_buf.data();
  7614. params.no_alloc = false;
  7615. using unique_context = std::unique_ptr<ggml_context, decltype(&ggml_free)>;
  7616. unique_context lora_ctx(nullptr, ggml_free);
  7617. lora_ctx.reset(ggml_init(params));
  7618. std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
  7619. // load base model
  7620. std::unique_ptr<llama_model_loader> ml;
  7621. if (path_base_model) {
  7622. LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
  7623. ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*kv_overrides*/ nullptr));
  7624. ml->init_mapping(false); // no prefetching
  7625. }
  7626. // read tensors and apply
  7627. bool warned = false;
  7628. int n_tensors = 0;
  7629. std::vector<uint8_t> work_buffer;
  7630. while (true) {
  7631. if (fin.tell() == fin.size) {
  7632. // eof
  7633. break;
  7634. }
  7635. int32_t n_dims;
  7636. int32_t name_len;
  7637. int32_t ftype;
  7638. fin.read_raw(&n_dims, sizeof(n_dims));
  7639. fin.read_raw(&name_len, sizeof(name_len));
  7640. fin.read_raw(&ftype, sizeof(ftype));
  7641. if (n_dims != 1 && n_dims != 2) {
  7642. LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
  7643. return 1;
  7644. }
  7645. int32_t ne[2] = { 1, 1 };
  7646. for (int i = 0; i < n_dims; ++i) {
  7647. fin.read_raw(&ne[i], sizeof(ne[i]));
  7648. }
  7649. std::string name;
  7650. {
  7651. GGML_ASSERT(name_len <= 1024);
  7652. char buf[1024];
  7653. fin.read_raw(buf, name_len);
  7654. name = std::string(buf, name_len);
  7655. }
  7656. // check for lora suffix and get the type of tensor
  7657. const std::string lora_suffix = ".lora";
  7658. size_t pos = name.rfind(lora_suffix);
  7659. if (pos == std::string::npos) {
  7660. LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  7661. return 1;
  7662. }
  7663. std::string lora_type = name.substr(pos + lora_suffix.length());
  7664. std::string base_name = name;
  7665. base_name.erase(pos);
  7666. // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(), base_name.c_str(), lora_type.c_str());
  7667. if (model_tensors.find(base_name) == model_tensors.end()) {
  7668. LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
  7669. return 1;
  7670. }
  7671. // create ggml tensor
  7672. ggml_type wtype;
  7673. switch (ftype) {
  7674. case 0: wtype = GGML_TYPE_F32; break;
  7675. case 1: wtype = GGML_TYPE_F16; break;
  7676. default:
  7677. {
  7678. LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
  7679. __func__, ftype);
  7680. return false;
  7681. }
  7682. }
  7683. ggml_tensor * lora_tensor = ggml_new_tensor_2d(lora_ctx.get(), wtype, ne[0], ne[1]);
  7684. ggml_set_name(lora_tensor, name.c_str());
  7685. // load tensor data
  7686. size_t offset = fin.tell();
  7687. size_t tensor_data_size = ggml_nbytes(lora_tensor);
  7688. offset = (offset + 31) & -32;
  7689. fin.seek(offset, SEEK_SET);
  7690. fin.read_raw(lora_tensor->data, tensor_data_size);
  7691. lora_tensors[name] = lora_tensor;
  7692. // check if we have both A and B tensors and apply
  7693. if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
  7694. lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
  7695. ggml_tensor * dest_t = model_tensors[base_name];
  7696. offload_func_t offload_func = ggml_offload_nop;
  7697. offload_func_t offload_func_force_inplace = ggml_offload_nop;
  7698. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  7699. if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) {
  7700. if (dest_t->type != GGML_TYPE_F16) {
  7701. throw std::runtime_error(format(
  7702. "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models. dest_t->type: %d", __func__, dest_t->type));
  7703. }
  7704. offload_func = ggml_cuda_assign_buffers;
  7705. offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace;
  7706. }
  7707. #endif // GGML_USE_CUBLAS
  7708. ggml_tensor * base_t;
  7709. if (ml) {
  7710. struct gguf_context * ctx_gguf = ml->ctx_gguf;
  7711. // load from base model
  7712. if (gguf_find_tensor(ctx_gguf, base_name.c_str()) < 0) {
  7713. LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  7714. return 1;
  7715. }
  7716. base_t = ml->get_tensor_meta(base_name.c_str());
  7717. ml->load_data_for(base_t);
  7718. } else {
  7719. base_t = dest_t;
  7720. }
  7721. if (ggml_is_quantized(base_t->type)) {
  7722. if (!warned) {
  7723. LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  7724. "use a f16 or f32 base model with --lora-base\n", __func__);
  7725. warned = true;
  7726. }
  7727. }
  7728. ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
  7729. GGML_ASSERT(loraA->type == GGML_TYPE_F32);
  7730. ggml_set_name(loraA, "loraA");
  7731. ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
  7732. GGML_ASSERT(loraB->type == GGML_TYPE_F32);
  7733. ggml_set_name(loraB, "loraB");
  7734. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  7735. LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  7736. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  7737. return 1;
  7738. }
  7739. // w = w + BA*s
  7740. ggml_tensor * BA = ggml_mul_mat(lora_ctx.get(), loraA, loraB);
  7741. offload_func(BA);
  7742. ggml_set_name(BA, "BA");
  7743. if (scaling != 1.0f) {
  7744. BA = ggml_scale_inplace(lora_ctx.get(), BA, scaling);
  7745. offload_func(BA);
  7746. ggml_set_name(BA, "BA_scaled");
  7747. }
  7748. ggml_tensor * r;
  7749. if (base_t == dest_t) {
  7750. r = ggml_add_inplace(lora_ctx.get(), dest_t, BA);
  7751. offload_func_force_inplace(r);
  7752. ggml_set_name(r, "r_add_inplace");
  7753. }
  7754. else {
  7755. r = ggml_add(lora_ctx.get(), base_t, BA);
  7756. offload_func(r);
  7757. ggml_set_name(r, "r_add");
  7758. r = ggml_cpy(lora_ctx.get(), r, dest_t);
  7759. offload_func(r);
  7760. ggml_set_name(r, "r_cpy");
  7761. }
  7762. struct ggml_cgraph * gf = ggml_new_graph(lora_ctx.get());
  7763. ggml_build_forward_expand(gf, r);
  7764. ggml_graph_compute_helper(work_buffer, gf, n_threads);
  7765. // the tensors in the adapter must be sorted such that loraA and loraB of the same tensor are next to each other
  7766. GGML_ASSERT(lora_tensors.size() == 2);
  7767. // we won't need these tensors again, reset the context to save memory
  7768. lora_ctx.reset(ggml_init(params));
  7769. lora_tensors.clear();
  7770. n_tensors++;
  7771. if (n_tensors % 4 == 0) {
  7772. LLAMA_LOG_INFO(".");
  7773. }
  7774. }
  7775. }
  7776. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  7777. LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
  7778. return 0;
  7779. }
  7780. //
  7781. // interface implementation
  7782. //
  7783. struct llama_model_params llama_model_default_params() {
  7784. struct llama_model_params result = {
  7785. /*.n_gpu_layers =*/ 0,
  7786. /*.main_gpu =*/ 0,
  7787. /*.tensor_split =*/ nullptr,
  7788. /*.progress_callback =*/ nullptr,
  7789. /*.progress_callback_user_data =*/ nullptr,
  7790. /*.kv_overrides =*/ nullptr,
  7791. /*.vocab_only =*/ false,
  7792. /*.use_mmap =*/ true,
  7793. /*.use_mlock =*/ false,
  7794. };
  7795. #ifdef GGML_USE_METAL
  7796. result.n_gpu_layers = 1;
  7797. #endif
  7798. return result;
  7799. }
  7800. struct llama_context_params llama_context_default_params() {
  7801. struct llama_context_params result = {
  7802. /*.seed =*/ LLAMA_DEFAULT_SEED,
  7803. /*.n_ctx =*/ 512,
  7804. /*.n_batch =*/ 512,
  7805. /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
  7806. /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
  7807. /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_UNSPECIFIED,
  7808. /*.rope_freq_base =*/ 0.0f,
  7809. /*.rope_freq_scale =*/ 0.0f,
  7810. /*.yarn_ext_factor =*/ -1.0f,
  7811. /*.yarn_attn_factor =*/ 1.0f,
  7812. /*.yarn_beta_fast =*/ 32.0f,
  7813. /*.yarn_beta_slow =*/ 1.0f,
  7814. /*.yarn_orig_ctx =*/ 0,
  7815. /*.type_k =*/ GGML_TYPE_F16,
  7816. /*.type_v =*/ GGML_TYPE_F16,
  7817. /*.mul_mat_q =*/ true,
  7818. /*.logits_all =*/ false,
  7819. /*.embedding =*/ false,
  7820. /*.offload_kqv =*/ true,
  7821. };
  7822. return result;
  7823. }
  7824. struct llama_model_quantize_params llama_model_quantize_default_params() {
  7825. struct llama_model_quantize_params result = {
  7826. /*.nthread =*/ 0,
  7827. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  7828. /*.allow_requantize =*/ false,
  7829. /*.quantize_output_tensor =*/ true,
  7830. /*.only_copy =*/ false,
  7831. /*.pure =*/ false,
  7832. };
  7833. return result;
  7834. }
  7835. int llama_max_devices(void) {
  7836. return LLAMA_MAX_DEVICES;
  7837. }
  7838. bool llama_mmap_supported(void) {
  7839. return llama_mmap::SUPPORTED;
  7840. }
  7841. bool llama_mlock_supported(void) {
  7842. return llama_mlock::SUPPORTED;
  7843. }
  7844. void llama_backend_init(bool numa) {
  7845. ggml_time_init();
  7846. // needed to initialize f16 tables
  7847. {
  7848. struct ggml_init_params params = { 0, NULL, false };
  7849. struct ggml_context * ctx = ggml_init(params);
  7850. ggml_free(ctx);
  7851. }
  7852. if (numa) {
  7853. ggml_numa_init();
  7854. }
  7855. #ifdef GGML_USE_MPI
  7856. ggml_mpi_backend_init();
  7857. #endif
  7858. }
  7859. void llama_backend_free(void) {
  7860. #ifdef GGML_USE_MPI
  7861. ggml_mpi_backend_free();
  7862. #endif
  7863. }
  7864. int64_t llama_time_us(void) {
  7865. return ggml_time_us();
  7866. }
  7867. struct llama_model * llama_load_model_from_file(
  7868. const char * path_model,
  7869. struct llama_model_params params) {
  7870. ggml_time_init();
  7871. llama_model * model = new llama_model;
  7872. unsigned cur_percentage = 0;
  7873. if (params.progress_callback == NULL) {
  7874. params.progress_callback_user_data = &cur_percentage;
  7875. params.progress_callback = [](float progress, void * ctx) {
  7876. unsigned * cur_percentage_p = (unsigned *) ctx;
  7877. unsigned percentage = (unsigned) (100 * progress);
  7878. while (percentage > *cur_percentage_p) {
  7879. *cur_percentage_p = percentage;
  7880. LLAMA_LOG_INFO(".");
  7881. if (percentage >= 100) {
  7882. LLAMA_LOG_INFO("\n");
  7883. }
  7884. }
  7885. return true;
  7886. };
  7887. }
  7888. int status = llama_model_load(path_model, *model, params);
  7889. GGML_ASSERT(status <= 0);
  7890. if (status < 0) {
  7891. if (status == -1) {
  7892. LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  7893. } else if (status == -2) {
  7894. LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  7895. }
  7896. delete model;
  7897. return nullptr;
  7898. }
  7899. return model;
  7900. }
  7901. void llama_free_model(struct llama_model * model) {
  7902. delete model;
  7903. }
  7904. struct llama_context * llama_new_context_with_model(
  7905. struct llama_model * model,
  7906. struct llama_context_params params) {
  7907. if (!model) {
  7908. return nullptr;
  7909. }
  7910. llama_context * ctx = new llama_context(*model);
  7911. const auto & hparams = model->hparams;
  7912. auto & cparams = ctx->cparams;
  7913. cparams.n_batch = params.n_batch;
  7914. cparams.n_threads = params.n_threads;
  7915. cparams.n_threads_batch = params.n_threads_batch;
  7916. cparams.yarn_ext_factor = params.yarn_ext_factor;
  7917. cparams.yarn_attn_factor = params.yarn_attn_factor;
  7918. cparams.yarn_beta_fast = params.yarn_beta_fast;
  7919. cparams.yarn_beta_slow = params.yarn_beta_slow;
  7920. cparams.mul_mat_q = params.mul_mat_q;
  7921. cparams.offload_kqv = params.offload_kqv;
  7922. cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
  7923. cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
  7924. cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
  7925. cparams.n_yarn_orig_ctx = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
  7926. hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx :
  7927. hparams.n_ctx_train;
  7928. auto rope_scaling_type = params.rope_scaling_type;
  7929. if (rope_scaling_type == LLAMA_ROPE_SCALING_UNSPECIFIED) {
  7930. rope_scaling_type = hparams.rope_scaling_type_train;
  7931. }
  7932. if (rope_scaling_type == LLAMA_ROPE_SCALING_NONE) {
  7933. cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
  7934. }
  7935. if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
  7936. cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_YARN ? 1.0f : 0.0f;
  7937. }
  7938. if (params.seed == LLAMA_DEFAULT_SEED) {
  7939. params.seed = time(NULL);
  7940. }
  7941. LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
  7942. LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
  7943. LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
  7944. ctx->rng = std::mt19937(params.seed);
  7945. ctx->logits_all = params.logits_all;
  7946. const ggml_type type_k = params.type_k;
  7947. const ggml_type type_v = params.type_v;
  7948. GGML_ASSERT(hparams.n_embd_head() % ggml_blck_size(type_k) == 0);
  7949. GGML_ASSERT(hparams.n_embd_head() % ggml_blck_size(type_v) == 0);
  7950. // reserve memory for context buffers
  7951. if (!hparams.vocab_only) {
  7952. // initialize backend
  7953. #ifdef GGML_USE_METAL
  7954. if (model->n_gpu_layers > 0) {
  7955. ctx->backend = ggml_backend_metal_init();
  7956. if (ctx->backend == nullptr) {
  7957. LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__);
  7958. }
  7959. }
  7960. #elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  7961. // for testing only
  7962. if (model->n_gpu_layers > 0) {
  7963. ctx->backend = ggml_backend_cuda_init(0);
  7964. if (ctx->backend == nullptr) {
  7965. LLAMA_LOG_ERROR("%s: failed to initialize CUDA backend\n", __func__);
  7966. }
  7967. }
  7968. #endif
  7969. if (ctx->backend == nullptr && ggml_backend_buffer_is_host(model->buf)) {
  7970. ctx->backend = ggml_backend_cpu_init();
  7971. if (ctx->backend == nullptr) {
  7972. LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__);
  7973. }
  7974. }
  7975. if (ctx->backend == nullptr) {
  7976. LLAMA_LOG_ERROR("%s: failed to initialize a backend\n", __func__);
  7977. delete ctx;
  7978. return nullptr;
  7979. }
  7980. if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, type_k, type_v,
  7981. cparams.n_ctx, model->n_gpu_layers, cparams.offload_kqv)) {
  7982. LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
  7983. llama_free(ctx);
  7984. return nullptr;
  7985. }
  7986. {
  7987. size_t memory_size_k = 0;
  7988. size_t memory_size_v = 0;
  7989. for (auto & k : ctx->kv_self.k_l) {
  7990. memory_size_k += ggml_nbytes(k);
  7991. }
  7992. for (auto & v : ctx->kv_self.v_l) {
  7993. memory_size_v += ggml_nbytes(v);
  7994. }
  7995. LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
  7996. (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
  7997. ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
  7998. ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
  7999. }
  8000. // resized during inference
  8001. if (params.logits_all) {
  8002. ctx->logits.reserve(cparams.n_ctx*hparams.n_vocab);
  8003. } else {
  8004. ctx->logits.reserve(hparams.n_vocab);
  8005. }
  8006. if (params.embedding){
  8007. ctx->embedding.resize(hparams.n_embd);
  8008. }
  8009. {
  8010. // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data
  8011. ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead());
  8012. // create measure allocator
  8013. ctx->alloc = ggml_allocr_new_measure_from_backend(ctx->backend);
  8014. // build worst-case graph
  8015. int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_batch);
  8016. int n_past = cparams.n_ctx - n_tokens;
  8017. llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  8018. ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0));
  8019. // measure memory requirements for the graph
  8020. size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf);
  8021. LLAMA_LOG_INFO("%s: compute buffer total size = %.2f MiB\n", __func__, (ctx->buf_compute_meta.size() + alloc_size) / 1024.0 / 1024.0);
  8022. // create allocator again with exact memory requirements
  8023. ggml_allocr_free(ctx->alloc);
  8024. ctx->buf_alloc = ggml_backend_alloc_buffer(ctx->backend, alloc_size);
  8025. ctx->alloc = ggml_allocr_new_from_buffer(ctx->buf_alloc);
  8026. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  8027. if (model->n_gpu_layers > 0) {
  8028. // the CPU buffer adds this padding in case the malloc buffer is not aligned, so we need to do the same for the GPU buffer, since we use the same offsets
  8029. ggml_cuda_set_scratch_size(alloc_size + 64);
  8030. LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MiB\n", __func__, alloc_size / 1024.0 / 1024.0);
  8031. // calculate total VRAM usage
  8032. auto add_tensor = [](const ggml_tensor * t, size_t & size) {
  8033. if (t->backend == GGML_BACKEND_GPU || t->backend == GGML_BACKEND_GPU_SPLIT) {
  8034. size += ggml_nbytes(t);
  8035. }
  8036. };
  8037. size_t model_vram_size = 0;
  8038. for (const auto & kv : model->tensors_by_name) {
  8039. add_tensor(kv.second, model_vram_size);
  8040. }
  8041. size_t kv_vram_size = 0;
  8042. for (auto & k : ctx->kv_self.k_l) {
  8043. add_tensor(k, kv_vram_size);
  8044. }
  8045. for (auto & v : ctx->kv_self.v_l) {
  8046. add_tensor(v, kv_vram_size);
  8047. }
  8048. size_t ctx_vram_size = alloc_size + kv_vram_size;
  8049. size_t total_vram_size = model_vram_size + ctx_vram_size;
  8050. LLAMA_LOG_INFO("%s: total VRAM used: %.2f MiB (model: %.2f MiB, context: %.2f MiB)\n", __func__,
  8051. total_vram_size / 1024.0 / 1024.0,
  8052. model_vram_size / 1024.0 / 1024.0,
  8053. ctx_vram_size / 1024.0 / 1024.0);
  8054. }
  8055. #endif
  8056. }
  8057. }
  8058. #ifdef GGML_USE_MPI
  8059. ctx->ctx_mpi = ggml_mpi_init();
  8060. if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
  8061. // Enter a blocking eval loop with dummy input, letting rank=0 drive the process
  8062. // TODO: needs fix after #3228
  8063. GGML_ASSERT(false && "not implemented");
  8064. //const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
  8065. //while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
  8066. llama_backend_free();
  8067. exit(1);
  8068. }
  8069. #endif
  8070. return ctx;
  8071. }
  8072. void llama_free(struct llama_context * ctx) {
  8073. delete ctx;
  8074. }
  8075. const llama_model * llama_get_model(const struct llama_context * ctx) {
  8076. return &ctx->model;
  8077. }
  8078. uint32_t llama_n_ctx(const struct llama_context * ctx) {
  8079. return ctx->cparams.n_ctx;
  8080. }
  8081. uint32_t llama_n_batch(const struct llama_context * ctx) {
  8082. return ctx->cparams.n_batch;
  8083. }
  8084. enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
  8085. return model->vocab.type;
  8086. }
  8087. int llama_n_vocab(const struct llama_model * model) {
  8088. return model->vocab.id_to_token.size();
  8089. }
  8090. int llama_n_ctx_train(const struct llama_model * model) {
  8091. return model->hparams.n_ctx_train;
  8092. }
  8093. int llama_n_embd(const struct llama_model * model) {
  8094. return model->hparams.n_embd;
  8095. }
  8096. float llama_rope_freq_scale_train(const struct llama_model * model) {
  8097. return model->hparams.rope_freq_scale_train;
  8098. }
  8099. int llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
  8100. const auto & it = model->gguf_kv.find(key);
  8101. if (it == model->gguf_kv.end()) {
  8102. if (buf_size > 0) {
  8103. buf[0] = '\0';
  8104. }
  8105. return -1;
  8106. }
  8107. return snprintf(buf, buf_size, "%s", it->second.c_str());
  8108. }
  8109. int llama_model_meta_count(const struct llama_model * model) {
  8110. return (int)model->gguf_kv.size();
  8111. }
  8112. int llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
  8113. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  8114. if (buf_size > 0) {
  8115. buf[0] = '\0';
  8116. }
  8117. return -1;
  8118. }
  8119. auto it = model->gguf_kv.begin();
  8120. std::advance(it, i);
  8121. return snprintf(buf, buf_size, "%s", it->first.c_str());
  8122. }
  8123. int llama_model_meta_val_str_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
  8124. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  8125. if (buf_size > 0) {
  8126. buf[0] = '\0';
  8127. }
  8128. return -1;
  8129. }
  8130. auto it = model->gguf_kv.begin();
  8131. std::advance(it, i);
  8132. return snprintf(buf, buf_size, "%s", it->second.c_str());
  8133. }
  8134. int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
  8135. return snprintf(buf, buf_size, "%s %s %s",
  8136. llama_model_arch_name(model->arch).c_str(),
  8137. llama_model_type_name(model->type),
  8138. llama_model_ftype_name(model->ftype).c_str());
  8139. }
  8140. uint64_t llama_model_size(const struct llama_model * model) {
  8141. uint64_t size = 0;
  8142. for (const auto & it : model->tensors_by_name) {
  8143. size += ggml_nbytes(it.second);
  8144. }
  8145. return size;
  8146. }
  8147. uint64_t llama_model_n_params(const struct llama_model * model) {
  8148. uint64_t nparams = 0;
  8149. for (const auto & it : model->tensors_by_name) {
  8150. nparams += ggml_nelements(it.second);
  8151. }
  8152. return nparams;
  8153. }
  8154. struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
  8155. return ggml_get_tensor(model->ctx, name);
  8156. }
  8157. int llama_model_quantize(
  8158. const char * fname_inp,
  8159. const char * fname_out,
  8160. const llama_model_quantize_params * params) {
  8161. try {
  8162. llama_model_quantize_internal(fname_inp, fname_out, params);
  8163. return 0;
  8164. } catch (const std::exception & err) {
  8165. LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
  8166. return 1;
  8167. }
  8168. }
  8169. int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, float scale, const char * path_base_model, int n_threads) {
  8170. try {
  8171. return llama_apply_lora_from_file_internal(ctx->model, path_lora, scale, path_base_model, n_threads);
  8172. } catch (const std::exception & err) {
  8173. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  8174. return 1;
  8175. }
  8176. }
  8177. int llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, float scale, const char * path_base_model, int n_threads) {
  8178. try {
  8179. return llama_apply_lora_from_file_internal(*model, path_lora, scale, path_base_model, n_threads);
  8180. } catch (const std::exception & err) {
  8181. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  8182. return 1;
  8183. }
  8184. }
  8185. struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_max_seq) {
  8186. struct llama_kv_cache_view result = {
  8187. /*.n_cells = */ 0,
  8188. /*.n_max_seq = */ n_max_seq,
  8189. /*.token_count = */ 0,
  8190. /*.used_cells = */ llama_get_kv_cache_used_cells(ctx),
  8191. /*.max_contiguous = */ 0,
  8192. /*.max_contiguous_idx = */ -1,
  8193. /*.cells = */ nullptr,
  8194. /*.cells_sequences = */ nullptr,
  8195. };
  8196. return result;
  8197. }
  8198. void llama_kv_cache_view_free(struct llama_kv_cache_view * view) {
  8199. if (view->cells != nullptr) {
  8200. free(view->cells);
  8201. view->cells = nullptr;
  8202. }
  8203. if (view->cells_sequences != nullptr) {
  8204. free(view->cells_sequences);
  8205. view->cells_sequences = nullptr;
  8206. }
  8207. }
  8208. void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) {
  8209. if (uint32_t(view->n_cells) < ctx->kv_self.size || view->cells == nullptr) {
  8210. view->n_cells = int32_t(ctx->kv_self.size);
  8211. void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells);
  8212. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells");
  8213. view->cells = (struct llama_kv_cache_view_cell *)p;
  8214. p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_max_seq * view->n_cells);
  8215. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences");
  8216. view->cells_sequences = (llama_seq_id *)p;
  8217. }
  8218. const std::vector<llama_kv_cell> & kv_cells = ctx->kv_self.cells;
  8219. llama_kv_cache_view_cell * c_curr = view->cells;
  8220. llama_seq_id * cs_curr = view->cells_sequences;
  8221. int32_t used_cells = 0;
  8222. int32_t token_count = 0;
  8223. int32_t curr_contig_idx = -1;
  8224. uint32_t max_contig = 0;
  8225. int32_t max_contig_idx = -1;
  8226. for (int32_t i = 0; i < int32_t(ctx->kv_self.size); i++, c_curr++, cs_curr += view->n_max_seq) {
  8227. const size_t curr_size = kv_cells[i].seq_id.size();
  8228. token_count += curr_size;
  8229. c_curr->pos = kv_cells[i].pos + kv_cells[i].delta;
  8230. if (curr_size > 0) {
  8231. if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) {
  8232. max_contig = i - curr_contig_idx;
  8233. max_contig_idx = curr_contig_idx;
  8234. }
  8235. curr_contig_idx = -1;
  8236. } else if (curr_contig_idx < 0) {
  8237. curr_contig_idx = i;
  8238. }
  8239. int seq_idx = 0;
  8240. for (const llama_seq_id it : kv_cells[i].seq_id) {
  8241. if (seq_idx >= view->n_max_seq) {
  8242. break;
  8243. }
  8244. cs_curr[seq_idx] = it;
  8245. seq_idx++;
  8246. }
  8247. if (seq_idx != 0) {
  8248. used_cells++;
  8249. }
  8250. for (; seq_idx < view->n_max_seq; seq_idx++) {
  8251. cs_curr[seq_idx] = -1;
  8252. }
  8253. }
  8254. if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) {
  8255. max_contig_idx = curr_contig_idx;
  8256. max_contig = kv_cells.size() - curr_contig_idx;
  8257. }
  8258. view->max_contiguous = max_contig;
  8259. view->max_contiguous_idx = max_contig_idx;
  8260. view->token_count = token_count;
  8261. view->used_cells = used_cells;
  8262. if (uint32_t(used_cells) != ctx->kv_self.used) {
  8263. LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n",
  8264. __func__, ctx->kv_self.used, used_cells);
  8265. }
  8266. }
  8267. int llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  8268. int result = 0;
  8269. for (uint32_t i = 0; i < ctx->kv_self.size; i++) {
  8270. result += ctx->kv_self.cells[i].seq_id.size();
  8271. }
  8272. return result;
  8273. }
  8274. int llama_get_kv_cache_used_cells(const struct llama_context * ctx) {
  8275. return ctx->kv_self.used;
  8276. }
  8277. void llama_kv_cache_clear(struct llama_context * ctx) {
  8278. llama_kv_cache_clear(ctx->kv_self);
  8279. }
  8280. void llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  8281. llama_kv_cache_seq_rm(ctx->kv_self, seq_id, p0, p1);
  8282. }
  8283. void llama_kv_cache_seq_cp(struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  8284. if (seq_id_src == seq_id_dst) {
  8285. return;
  8286. }
  8287. llama_kv_cache_seq_cp(ctx->kv_self, seq_id_src, seq_id_dst, p0, p1);
  8288. }
  8289. void llama_kv_cache_seq_keep(struct llama_context * ctx, llama_seq_id seq_id) {
  8290. llama_kv_cache_seq_keep(ctx->kv_self, seq_id);
  8291. }
  8292. void llama_kv_cache_seq_shift(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) {
  8293. llama_kv_cache_seq_shift(ctx->kv_self, seq_id, p0, p1, delta);
  8294. }
  8295. // Returns the *maximum* size of the state
  8296. size_t llama_get_state_size(const struct llama_context * ctx) {
  8297. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  8298. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  8299. const size_t s_rng_size = sizeof(size_t);
  8300. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  8301. const size_t s_logits_capacity = sizeof(size_t);
  8302. const size_t s_logits_size = sizeof(size_t);
  8303. const size_t s_logits = ctx->logits.capacity() * sizeof(float);
  8304. const size_t s_embedding_size = sizeof(size_t);
  8305. const size_t s_embedding = ctx->embedding.size() * sizeof(float);
  8306. const size_t s_kv_size = sizeof(size_t);
  8307. const size_t s_kv_ntok = sizeof(int);
  8308. const size_t s_kv = ggml_backend_buffer_get_size(ctx->kv_self.buf);
  8309. const size_t s_total = (
  8310. + s_rng_size
  8311. + s_rng
  8312. + s_logits_capacity
  8313. + s_logits_size
  8314. + s_logits
  8315. + s_embedding_size
  8316. + s_embedding
  8317. + s_kv_size
  8318. + s_kv_ntok
  8319. + s_kv
  8320. );
  8321. return s_total;
  8322. }
  8323. // llama_context_data
  8324. struct llama_data_context {
  8325. virtual void write(const void * src, size_t size) = 0;
  8326. virtual size_t get_size_written() = 0;
  8327. virtual ~llama_data_context() = default;
  8328. };
  8329. struct llama_data_buffer_context : llama_data_context {
  8330. uint8_t * ptr;
  8331. size_t size_written = 0;
  8332. llama_data_buffer_context(uint8_t * p) : ptr(p) {}
  8333. void write(const void * src, size_t size) override {
  8334. memcpy(ptr, src, size);
  8335. ptr += size;
  8336. size_written += size;
  8337. }
  8338. size_t get_size_written() override {
  8339. return size_written;
  8340. }
  8341. };
  8342. struct llama_data_file_context : llama_data_context {
  8343. llama_file * file;
  8344. size_t size_written = 0;
  8345. llama_data_file_context(llama_file * f) : file(f) {}
  8346. void write(const void * src, size_t size) override {
  8347. file->write_raw(src, size);
  8348. size_written += size;
  8349. }
  8350. size_t get_size_written() override {
  8351. return size_written;
  8352. }
  8353. };
  8354. /** copy state data into either a buffer or file depending on the passed in context
  8355. *
  8356. * file context:
  8357. * llama_file file("/path", "wb");
  8358. * llama_data_file_context data_ctx(&file);
  8359. * llama_copy_state_data(ctx, &data_ctx);
  8360. *
  8361. * buffer context:
  8362. * std::vector<uint8_t> buf(max_size, 0);
  8363. * llama_data_buffer_context data_ctx(&buf.data());
  8364. * llama_copy_state_data(ctx, &data_ctx);
  8365. *
  8366. */
  8367. static void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
  8368. // copy rng
  8369. {
  8370. std::stringstream rng_ss;
  8371. rng_ss << ctx->rng;
  8372. const size_t rng_size = rng_ss.str().size();
  8373. char rng_buf[LLAMA_MAX_RNG_STATE];
  8374. memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE);
  8375. memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
  8376. data_ctx->write(&rng_size, sizeof(rng_size));
  8377. data_ctx->write(&rng_buf[0], LLAMA_MAX_RNG_STATE);
  8378. }
  8379. // copy logits
  8380. {
  8381. const size_t logits_cap = ctx->logits.capacity();
  8382. const size_t logits_size = ctx->logits.size();
  8383. data_ctx->write(&logits_cap, sizeof(logits_cap));
  8384. data_ctx->write(&logits_size, sizeof(logits_size));
  8385. if (logits_size) {
  8386. data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
  8387. }
  8388. // If there is a gap between the size and the capacity, write padding
  8389. size_t padding_size = (logits_cap - logits_size) * sizeof(float);
  8390. if (padding_size > 0) {
  8391. std::vector<uint8_t> padding(padding_size, 0); // Create a buffer filled with zeros
  8392. data_ctx->write(padding.data(), padding_size);
  8393. }
  8394. }
  8395. // copy embeddings
  8396. {
  8397. const size_t embedding_size = ctx->embedding.size();
  8398. data_ctx->write(&embedding_size, sizeof(embedding_size));
  8399. if (embedding_size) {
  8400. data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float));
  8401. }
  8402. }
  8403. // copy kv cache
  8404. {
  8405. const auto & kv_self = ctx->kv_self;
  8406. const auto & hparams = ctx->model.hparams;
  8407. const auto & cparams = ctx->cparams;
  8408. const auto n_layer = hparams.n_layer;
  8409. const auto n_embd = hparams.n_embd_gqa();
  8410. const auto n_ctx = cparams.n_ctx;
  8411. const size_t kv_buf_size = ggml_backend_buffer_get_size(kv_self.buf);
  8412. const uint32_t kv_head = kv_self.head;
  8413. const uint32_t kv_size = kv_self.size;
  8414. const uint32_t kv_used = kv_self.used;
  8415. data_ctx->write(&kv_buf_size, sizeof(kv_buf_size));
  8416. data_ctx->write(&kv_head, sizeof(kv_head));
  8417. data_ctx->write(&kv_size, sizeof(kv_size));
  8418. data_ctx->write(&kv_used, sizeof(kv_used));
  8419. if (kv_buf_size) {
  8420. const size_t elt_size = ggml_element_size(kv_self.k_l[0]);
  8421. ggml_context * cpy_ctx = ggml_init({ 6*n_layer*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
  8422. ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
  8423. std::vector<struct ggml_tensor *> kout2d(n_layer);
  8424. std::vector<struct ggml_tensor *> vout2d(n_layer);
  8425. for (int il = 0; il < (int) n_layer; ++il) {
  8426. kout2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd, kv_head);
  8427. vout2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd);
  8428. ggml_tensor * k2d = ggml_view_2d(cpy_ctx, kv_self.k_l[il],
  8429. n_embd, kv_head,
  8430. elt_size*n_embd, 0);
  8431. ggml_tensor * v2d = ggml_view_2d(cpy_ctx, kv_self.v_l[il],
  8432. kv_head, n_embd,
  8433. elt_size*n_ctx, 0);
  8434. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k2d, kout2d[il]));
  8435. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v2d, vout2d[il]));
  8436. }
  8437. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(cpy_ctx, ctx->backend);
  8438. ggml_backend_graph_compute(ctx->backend, gf);
  8439. std::vector<uint8_t> tmp_buf;
  8440. for (int il = 0; il < (int) n_layer; ++il) {
  8441. tmp_buf.resize(ggml_nbytes(kout2d[il]));
  8442. ggml_backend_tensor_get(kout2d[il], tmp_buf.data(), 0, tmp_buf.size());
  8443. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  8444. tmp_buf.resize(ggml_nbytes(vout2d[il]));
  8445. ggml_backend_tensor_get(vout2d[il], tmp_buf.data(), 0, tmp_buf.size());
  8446. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  8447. }
  8448. ggml_free(cpy_ctx);
  8449. ggml_backend_buffer_free(buf);
  8450. }
  8451. for (uint32_t i = 0; i < kv_size; ++i) {
  8452. const auto & cell = kv_self.cells[i];
  8453. const llama_pos pos = cell.pos;
  8454. const size_t seq_id_size = cell.seq_id.size();
  8455. data_ctx->write(&pos, sizeof(pos));
  8456. data_ctx->write(&seq_id_size, sizeof(seq_id_size));
  8457. for (auto seq_id : cell.seq_id) {
  8458. data_ctx->write(&seq_id, sizeof(seq_id));
  8459. }
  8460. }
  8461. }
  8462. }
  8463. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  8464. llama_data_buffer_context data_ctx(dst);
  8465. llama_copy_state_data_internal(ctx, &data_ctx);
  8466. return data_ctx.get_size_written();
  8467. }
  8468. // Sets the state reading from the specified source address
  8469. size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
  8470. uint8_t * inp = src;
  8471. // set rng
  8472. {
  8473. size_t rng_size;
  8474. char rng_buf[LLAMA_MAX_RNG_STATE];
  8475. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  8476. memcpy(&rng_buf[0], inp, LLAMA_MAX_RNG_STATE); inp += LLAMA_MAX_RNG_STATE;
  8477. std::stringstream rng_ss;
  8478. rng_ss.str(std::string(&rng_buf[0], rng_size));
  8479. rng_ss >> ctx->rng;
  8480. GGML_ASSERT(!rng_ss.fail());
  8481. }
  8482. // set logits
  8483. {
  8484. size_t logits_cap;
  8485. size_t logits_size;
  8486. memcpy(&logits_cap, inp, sizeof(logits_cap)); inp += sizeof(logits_cap);
  8487. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  8488. GGML_ASSERT(ctx->logits.capacity() == logits_cap);
  8489. if (logits_size) {
  8490. ctx->logits.resize(logits_size);
  8491. memcpy(ctx->logits.data(), inp, logits_size * sizeof(float));
  8492. }
  8493. inp += logits_cap * sizeof(float);
  8494. }
  8495. // set embeddings
  8496. {
  8497. size_t embedding_size;
  8498. memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size);
  8499. GGML_ASSERT(ctx->embedding.capacity() == embedding_size);
  8500. if (embedding_size) {
  8501. memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float));
  8502. inp += embedding_size * sizeof(float);
  8503. }
  8504. }
  8505. // set kv cache
  8506. {
  8507. const auto & kv_self = ctx->kv_self;
  8508. const auto & hparams = ctx->model.hparams;
  8509. const auto & cparams = ctx->cparams;
  8510. const int n_layer = hparams.n_layer;
  8511. const int n_embd = hparams.n_embd_gqa();
  8512. const int n_ctx = cparams.n_ctx;
  8513. size_t kv_buf_size;
  8514. uint32_t kv_head;
  8515. uint32_t kv_size;
  8516. uint32_t kv_used;
  8517. memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size);
  8518. memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head);
  8519. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  8520. memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used);
  8521. if (kv_buf_size) {
  8522. GGML_ASSERT(ggml_backend_buffer_get_size(kv_self.buf) == kv_buf_size);
  8523. const size_t elt_size = ggml_element_size(kv_self.k_l[0]);
  8524. ggml_context * cpy_ctx = ggml_init({ 6*n_layer*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
  8525. ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
  8526. std::vector<struct ggml_tensor *> kin2d(n_layer);
  8527. std::vector<struct ggml_tensor *> vin2d(n_layer);
  8528. for (int il = 0; il < n_layer; ++il) {
  8529. kin2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd, kv_head);
  8530. vin2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd);
  8531. ggml_tensor * k2d = ggml_view_2d(cpy_ctx, kv_self.k_l[il],
  8532. n_embd, kv_head,
  8533. elt_size*n_embd, 0);
  8534. ggml_tensor * v2d = ggml_view_2d(cpy_ctx, kv_self.v_l[il],
  8535. kv_head, n_embd,
  8536. elt_size*n_ctx, 0);
  8537. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin2d[il], k2d));
  8538. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin2d[il], v2d));
  8539. }
  8540. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(cpy_ctx, ctx->backend);
  8541. // load data into the tensors
  8542. for (int il = 0; il < n_layer; ++il) {
  8543. ggml_backend_tensor_set(kin2d[il], inp, 0, ggml_nbytes(kin2d[il]));
  8544. inp += ggml_nbytes(kin2d[il]);
  8545. ggml_backend_tensor_set(vin2d[il], inp, 0, ggml_nbytes(vin2d[il]));
  8546. inp += ggml_nbytes(vin2d[il]);
  8547. }
  8548. ggml_backend_graph_compute(ctx->backend, gf);
  8549. ggml_free(cpy_ctx);
  8550. ggml_backend_buffer_free(buf);
  8551. }
  8552. ctx->kv_self.head = kv_head;
  8553. ctx->kv_self.size = kv_size;
  8554. ctx->kv_self.used = kv_used;
  8555. ctx->kv_self.cells.resize(kv_size);
  8556. for (uint32_t i = 0; i < kv_size; ++i) {
  8557. llama_pos pos;
  8558. size_t seq_id_size;
  8559. memcpy(&pos, inp, sizeof(pos)); inp += sizeof(pos);
  8560. memcpy(&seq_id_size, inp, sizeof(seq_id_size)); inp += sizeof(seq_id_size);
  8561. ctx->kv_self.cells[i].pos = pos;
  8562. llama_seq_id seq_id;
  8563. for (size_t j = 0; j < seq_id_size; ++j) {
  8564. memcpy(&seq_id, inp, sizeof(seq_id)); inp += sizeof(seq_id);
  8565. ctx->kv_self.cells[i].seq_id.insert(seq_id);
  8566. }
  8567. }
  8568. }
  8569. const size_t nread = inp - src;
  8570. const size_t max_size = llama_get_state_size(ctx);
  8571. GGML_ASSERT(nread <= max_size);
  8572. return nread;
  8573. }
  8574. static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  8575. llama_file file(path_session, "rb");
  8576. // sanity checks
  8577. {
  8578. const uint32_t magic = file.read_u32();
  8579. const uint32_t version = file.read_u32();
  8580. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  8581. LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  8582. return false;
  8583. }
  8584. llama_hparams session_hparams;
  8585. file.read_raw(&session_hparams, sizeof(llama_hparams));
  8586. if (session_hparams != ctx->model.hparams) {
  8587. LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
  8588. return false;
  8589. }
  8590. }
  8591. // load the prompt
  8592. {
  8593. const uint32_t n_token_count = file.read_u32();
  8594. if (n_token_count > n_token_capacity) {
  8595. LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  8596. return false;
  8597. }
  8598. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  8599. *n_token_count_out = n_token_count;
  8600. }
  8601. // restore the context state
  8602. {
  8603. const size_t n_state_size_cur = file.size - file.tell();
  8604. const size_t n_state_size_max = llama_get_state_size(ctx);
  8605. if (n_state_size_cur > n_state_size_max) {
  8606. LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  8607. return false;
  8608. }
  8609. std::vector<uint8_t> state_data(n_state_size_max);
  8610. file.read_raw(state_data.data(), n_state_size_cur);
  8611. llama_set_state_data(ctx, state_data.data());
  8612. }
  8613. return true;
  8614. }
  8615. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  8616. try {
  8617. return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  8618. } catch (const std::exception & err) {
  8619. LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
  8620. return false;
  8621. }
  8622. }
  8623. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  8624. llama_file file(path_session, "wb");
  8625. file.write_u32(LLAMA_SESSION_MAGIC);
  8626. file.write_u32(LLAMA_SESSION_VERSION);
  8627. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  8628. // save the prompt
  8629. file.write_u32((uint32_t) n_token_count);
  8630. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  8631. // save the context state using stream saving
  8632. llama_data_file_context data_ctx(&file);
  8633. llama_copy_state_data_internal(ctx, &data_ctx);
  8634. return true;
  8635. }
  8636. int llama_eval(
  8637. struct llama_context * ctx,
  8638. llama_token * tokens,
  8639. int32_t n_tokens,
  8640. int n_past) {
  8641. llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
  8642. const int ret = llama_decode_internal(*ctx, llama_batch_get_one(tokens, n_tokens, n_past, 0));
  8643. if (ret < 0) {
  8644. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  8645. }
  8646. return ret;
  8647. }
  8648. int llama_eval_embd(
  8649. struct llama_context * ctx,
  8650. float * embd,
  8651. int32_t n_tokens,
  8652. int n_past) {
  8653. llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
  8654. llama_batch batch = { n_tokens, nullptr, embd, nullptr, nullptr, nullptr, nullptr, n_past, 1, 0, };
  8655. const int ret = llama_decode_internal(*ctx, batch);
  8656. if (ret < 0) {
  8657. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  8658. }
  8659. return ret;
  8660. }
  8661. void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch) {
  8662. ctx->cparams.n_threads = n_threads;
  8663. ctx->cparams.n_threads_batch = n_threads_batch;
  8664. }
  8665. struct llama_batch llama_batch_get_one(
  8666. llama_token * tokens,
  8667. int32_t n_tokens,
  8668. llama_pos pos_0,
  8669. llama_seq_id seq_id) {
  8670. return {
  8671. /*n_tokens =*/ n_tokens,
  8672. /*tokens =*/ tokens,
  8673. /*embd =*/ nullptr,
  8674. /*pos =*/ nullptr,
  8675. /*n_seq_id =*/ nullptr,
  8676. /*seq_id =*/ nullptr,
  8677. /*logits =*/ nullptr,
  8678. /*all_pos_0 =*/ pos_0,
  8679. /*all_pos_1 =*/ 1,
  8680. /*all_seq_id =*/ seq_id,
  8681. };
  8682. }
  8683. struct llama_batch llama_batch_init(int32_t n_tokens, int32_t embd, int32_t n_seq_max) {
  8684. llama_batch batch = { 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, 0, 0, 0, };
  8685. if (embd) {
  8686. batch.embd = (float *) malloc(sizeof(float) * n_tokens * embd);
  8687. } else {
  8688. batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens);
  8689. }
  8690. batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens);
  8691. batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens);
  8692. batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * n_tokens);
  8693. for (int i = 0; i < n_tokens; ++i) {
  8694. batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
  8695. }
  8696. batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens);
  8697. return batch;
  8698. }
  8699. void llama_batch_free(struct llama_batch batch) {
  8700. if (batch.token) free(batch.token);
  8701. if (batch.embd) free(batch.embd);
  8702. if (batch.pos) free(batch.pos);
  8703. if (batch.n_seq_id) free(batch.n_seq_id);
  8704. if (batch.seq_id) {
  8705. for (int i = 0; i < batch.n_tokens; ++i) {
  8706. free(batch.seq_id[i]);
  8707. }
  8708. free(batch.seq_id);
  8709. }
  8710. if (batch.logits) free(batch.logits);
  8711. }
  8712. int llama_decode(
  8713. struct llama_context * ctx,
  8714. struct llama_batch batch) {
  8715. const int ret = llama_decode_internal(*ctx, batch);
  8716. if (ret < 0) {
  8717. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  8718. }
  8719. return ret;
  8720. }
  8721. float * llama_get_logits(struct llama_context * ctx) {
  8722. return ctx->logits.data();
  8723. }
  8724. float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
  8725. assert(ctx->logits_valid.at(i));
  8726. return ctx->logits.data() + i*ctx->model.hparams.n_vocab;
  8727. }
  8728. float * llama_get_embeddings(struct llama_context * ctx) {
  8729. return ctx->embedding.data();
  8730. }
  8731. const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
  8732. return model->vocab.id_to_token[token].text.c_str();
  8733. }
  8734. float llama_token_get_score(const struct llama_model * model, llama_token token) {
  8735. return model->vocab.id_to_token[token].score;
  8736. }
  8737. llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token) {
  8738. return model->vocab.id_to_token[token].type;
  8739. }
  8740. llama_token llama_token_bos(const struct llama_model * model) {
  8741. return model->vocab.special_bos_id;
  8742. }
  8743. llama_token llama_token_eos(const struct llama_model * model) {
  8744. return model->vocab.special_eos_id;
  8745. }
  8746. llama_token llama_token_nl(const struct llama_model * model) {
  8747. return model->vocab.linefeed_id;
  8748. }
  8749. int llama_add_bos_token(const struct llama_model * model) {
  8750. return model->vocab.special_add_bos;
  8751. }
  8752. int llama_add_eos_token(const struct llama_model * model) {
  8753. return model->vocab.special_add_eos;
  8754. }
  8755. llama_token llama_token_prefix(const struct llama_model * model) {
  8756. return model->vocab.special_prefix_id;
  8757. }
  8758. llama_token llama_token_middle(const struct llama_model * model) {
  8759. return model->vocab.special_middle_id;
  8760. }
  8761. llama_token llama_token_suffix(const struct llama_model * model) {
  8762. return model->vocab.special_suffix_id;
  8763. }
  8764. llama_token llama_token_eot(const struct llama_model * model) {
  8765. return model->vocab.special_eot_id;
  8766. }
  8767. int llama_tokenize(
  8768. const struct llama_model * model,
  8769. const char * text,
  8770. int text_len,
  8771. llama_token * tokens,
  8772. int n_max_tokens,
  8773. bool add_bos,
  8774. bool special) {
  8775. auto res = llama_tokenize_internal(model->vocab, std::string(text, text_len), add_bos, special);
  8776. if (n_max_tokens < (int) res.size()) {
  8777. // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
  8778. return -((int) res.size());
  8779. }
  8780. for (size_t i = 0; i < res.size(); i++) {
  8781. tokens[i] = res[i];
  8782. }
  8783. return res.size();
  8784. }
  8785. static std::string llama_decode_text(const std::string & text) {
  8786. std::string decoded_text;
  8787. auto unicode_sequences = codepoints_from_utf8(text);
  8788. for (auto& unicode_sequence : unicode_sequences) {
  8789. decoded_text += unicode_to_bytes_bpe(codepoint_to_utf8(unicode_sequence));
  8790. }
  8791. return decoded_text;
  8792. }
  8793. // does not write null-terminator to buf
  8794. int llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int length) {
  8795. if (0 <= token && token < llama_n_vocab(model)) {
  8796. switch (llama_vocab_get_type(model->vocab)) {
  8797. case LLAMA_VOCAB_TYPE_SPM: {
  8798. if (llama_is_normal_token(model->vocab, token)) {
  8799. std::string result = model->vocab.id_to_token[token].text;
  8800. llama_unescape_whitespace(result);
  8801. if (length < (int) result.length()) {
  8802. return -(int) result.length();
  8803. }
  8804. memcpy(buf, result.c_str(), result.length());
  8805. return result.length();
  8806. } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
  8807. if (length < 3) {
  8808. return -3;
  8809. }
  8810. memcpy(buf, "\xe2\x96\x85", 3);
  8811. return 3;
  8812. } else if (llama_is_control_token(model->vocab, token)) {
  8813. ;
  8814. } else if (llama_is_byte_token(model->vocab, token)) {
  8815. if (length < 1) {
  8816. return -1;
  8817. }
  8818. buf[0] = llama_token_to_byte(model->vocab, token);
  8819. return 1;
  8820. } else {
  8821. // TODO: for now we accept all unsupported token types,
  8822. // suppressing them like CONTROL tokens.
  8823. // GGML_ASSERT(false);
  8824. }
  8825. break;
  8826. }
  8827. case LLAMA_VOCAB_TYPE_BPE: {
  8828. if (llama_is_normal_token(model->vocab, token)) {
  8829. std::string result = model->vocab.id_to_token[token].text;
  8830. result = llama_decode_text(result);
  8831. if (length < (int) result.length()) {
  8832. return -(int) result.length();
  8833. }
  8834. memcpy(buf, result.c_str(), result.length());
  8835. return result.length();
  8836. } else if (llama_is_control_token(model->vocab, token)) {
  8837. ;
  8838. } else {
  8839. // TODO: for now we accept all unsupported token types,
  8840. // suppressing them like CONTROL tokens.
  8841. // GGML_ASSERT(false);
  8842. }
  8843. break;
  8844. }
  8845. default:
  8846. GGML_ASSERT(false);
  8847. }
  8848. }
  8849. return 0;
  8850. }
  8851. struct llama_timings llama_get_timings(struct llama_context * ctx) {
  8852. struct llama_timings result = {
  8853. /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
  8854. /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
  8855. /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
  8856. /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
  8857. /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
  8858. /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
  8859. /*.n_sample =*/ std::max(1, ctx->n_sample),
  8860. /*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
  8861. /*.n_eval =*/ std::max(1, ctx->n_eval),
  8862. };
  8863. return result;
  8864. }
  8865. void llama_print_timings(struct llama_context * ctx) {
  8866. const llama_timings timings = llama_get_timings(ctx);
  8867. LLAMA_LOG_INFO("\n");
  8868. LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms);
  8869. LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  8870. __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
  8871. LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  8872. __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
  8873. LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  8874. __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
  8875. LLAMA_LOG_INFO("%s: total time = %10.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
  8876. }
  8877. void llama_reset_timings(struct llama_context * ctx) {
  8878. ctx->t_start_us = ggml_time_us();
  8879. ctx->t_sample_us = ctx->n_sample = 0;
  8880. ctx->t_eval_us = ctx->n_eval = 0;
  8881. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  8882. }
  8883. const char * llama_print_system_info(void) {
  8884. static std::string s;
  8885. s = "";
  8886. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  8887. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  8888. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  8889. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  8890. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  8891. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  8892. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  8893. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  8894. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  8895. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  8896. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  8897. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  8898. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  8899. s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
  8900. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  8901. return s.c_str();
  8902. }
  8903. void llama_dump_timing_info_yaml(FILE * stream, const llama_context * ctx) {
  8904. fprintf(stream, "\n");
  8905. fprintf(stream, "###########\n");
  8906. fprintf(stream, "# Timings #\n");
  8907. fprintf(stream, "###########\n");
  8908. fprintf(stream, "\n");
  8909. fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
  8910. 1.0e-3 * ctx->t_eval_us / ctx->n_eval);
  8911. fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
  8912. 1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
  8913. fprintf(stream, "mst_sample: %.2f # ms / token during sampling\n",
  8914. 1.0e-3 * ctx->t_sample_us / ctx->n_sample);
  8915. fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
  8916. fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
  8917. fprintf(stream, "n_sample: %d # number of sampled tokens\n", ctx->n_sample);
  8918. fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
  8919. fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
  8920. fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
  8921. fprintf(stream, "t_sample_us: %" PRId64 " # total microseconds spent sampling\n", ctx->t_sample_us);
  8922. fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
  8923. 1.0e6 * ctx->n_eval / ctx->t_eval_us);
  8924. fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
  8925. 1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
  8926. fprintf(stream, "ts_sample: %.2f # tokens / second during sampling\n",
  8927. 1.0e6 * ctx->n_sample / ctx->t_sample_us);
  8928. }
  8929. // For internal test use
  8930. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
  8931. struct llama_context * ctx
  8932. ) {
  8933. return ctx->model.tensors_by_name;
  8934. }
  8935. void llama_log_set(ggml_log_callback log_callback, void * user_data) {
  8936. g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
  8937. g_state.log_callback_user_data = user_data;
  8938. #ifdef GGML_USE_METAL
  8939. ggml_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
  8940. #endif
  8941. }
  8942. static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
  8943. va_list args_copy;
  8944. va_copy(args_copy, args);
  8945. char buffer[128];
  8946. int len = vsnprintf(buffer, 128, format, args);
  8947. if (len < 128) {
  8948. g_state.log_callback(level, buffer, g_state.log_callback_user_data);
  8949. } else {
  8950. char* buffer2 = new char[len+1];
  8951. vsnprintf(buffer2, len+1, format, args_copy);
  8952. buffer2[len] = 0;
  8953. g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
  8954. delete[] buffer2;
  8955. }
  8956. va_end(args_copy);
  8957. }
  8958. static void llama_log_internal(ggml_log_level level, const char * format, ...) {
  8959. va_list args;
  8960. va_start(args, format);
  8961. llama_log_internal_v(level, format, args);
  8962. va_end(args);
  8963. }
  8964. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
  8965. (void) level;
  8966. (void) user_data;
  8967. fputs(text, stderr);
  8968. fflush(stderr);
  8969. }