llama.cpp 495 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763
  1. #define LLAMA_API_INTERNAL
  2. #include "llama.h"
  3. #include "unicode.h"
  4. #include "ggml.h"
  5. #include "ggml-alloc.h"
  6. #include "ggml-backend.h"
  7. #ifdef GGML_USE_CUBLAS
  8. # include "ggml-cuda.h"
  9. #elif defined(GGML_USE_CLBLAST)
  10. # include "ggml-opencl.h"
  11. #elif defined(GGML_USE_VULKAN)
  12. # include "ggml-vulkan.h"
  13. #elif defined(GGML_USE_SYCL)
  14. # include "ggml-sycl.h"
  15. #elif defined(GGML_USE_KOMPUTE)
  16. # include "ggml-kompute.h"
  17. #endif
  18. #ifdef GGML_USE_METAL
  19. # include "ggml-metal.h"
  20. #endif
  21. #ifdef GGML_USE_MPI
  22. # include "ggml-mpi.h"
  23. #endif
  24. #ifndef QK_K
  25. # ifdef GGML_QKK_64
  26. # define QK_K 64
  27. # else
  28. # define QK_K 256
  29. # endif
  30. #endif
  31. #ifdef __has_include
  32. #if __has_include(<unistd.h>)
  33. #include <unistd.h>
  34. #if defined(_POSIX_MAPPED_FILES)
  35. #include <sys/mman.h>
  36. #include <fcntl.h>
  37. #endif
  38. #if defined(_POSIX_MEMLOCK_RANGE)
  39. #include <sys/resource.h>
  40. #endif
  41. #endif
  42. #endif
  43. #if defined(_WIN32)
  44. #define WIN32_LEAN_AND_MEAN
  45. #ifndef NOMINMAX
  46. #define NOMINMAX
  47. #endif
  48. #include <windows.h>
  49. #include <io.h>
  50. #endif
  51. #include <algorithm>
  52. #include <array>
  53. #include <cassert>
  54. #include <cfloat>
  55. #include <cinttypes>
  56. #include <climits>
  57. #include <cmath>
  58. #include <cstdarg>
  59. #include <cstddef>
  60. #include <cstdint>
  61. #include <cstdio>
  62. #include <cstring>
  63. #include <ctime>
  64. #include <forward_list>
  65. #include <fstream>
  66. #include <functional>
  67. #include <initializer_list>
  68. #include <map>
  69. #include <memory>
  70. #include <mutex>
  71. #include <numeric>
  72. #include <queue>
  73. #include <random>
  74. #include <regex>
  75. #include <set>
  76. #include <sstream>
  77. #include <thread>
  78. #include <type_traits>
  79. #include <unordered_map>
  80. #if defined(_MSC_VER)
  81. #pragma warning(disable: 4244 4267) // possible loss of data
  82. #endif
  83. #ifdef __GNUC__
  84. #ifdef __MINGW32__
  85. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  86. #else
  87. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  88. #endif
  89. #else
  90. #define LLAMA_ATTRIBUTE_FORMAT(...)
  91. #endif
  92. #define LLAMA_MAX_NODES 8192
  93. #define LLAMA_MAX_EXPERTS 8
  94. //
  95. // logging
  96. //
  97. LLAMA_ATTRIBUTE_FORMAT(2, 3)
  98. static void llama_log_internal (ggml_log_level level, const char* format, ...);
  99. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
  100. #define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
  101. #define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
  102. #define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  103. //
  104. // helpers
  105. //
  106. static size_t utf8_len(char src) {
  107. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  108. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  109. return lookup[highbits];
  110. }
  111. static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
  112. std::string result;
  113. for (size_t pos = 0; ; pos += search.length()) {
  114. auto new_pos = s.find(search, pos);
  115. if (new_pos == std::string::npos) {
  116. result += s.substr(pos, s.size() - pos);
  117. break;
  118. }
  119. result += s.substr(pos, new_pos - pos) + replace;
  120. pos = new_pos;
  121. }
  122. s = std::move(result);
  123. }
  124. static bool is_float_close(float a, float b, float abs_tol) {
  125. // Check for non-negative tolerance
  126. if (abs_tol < 0.0) {
  127. throw std::invalid_argument("Tolerance must be non-negative");
  128. }
  129. // Exact equality check
  130. if (a == b) {
  131. return true;
  132. }
  133. // Check for infinities
  134. if (std::isinf(a) || std::isinf(b)) {
  135. return false;
  136. }
  137. // Regular comparison using the provided absolute tolerance
  138. return std::fabs(b - a) <= abs_tol;
  139. }
  140. static void zeros(std::ofstream & file, size_t n) {
  141. char zero = 0;
  142. for (size_t i = 0; i < n; ++i) {
  143. file.write(&zero, 1);
  144. }
  145. }
  146. LLAMA_ATTRIBUTE_FORMAT(1, 2)
  147. static std::string format(const char * fmt, ...) {
  148. va_list ap;
  149. va_list ap2;
  150. va_start(ap, fmt);
  151. va_copy(ap2, ap);
  152. int size = vsnprintf(NULL, 0, fmt, ap);
  153. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  154. std::vector<char> buf(size + 1);
  155. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  156. GGML_ASSERT(size2 == size);
  157. va_end(ap2);
  158. va_end(ap);
  159. return std::string(buf.data(), size);
  160. }
  161. //
  162. // gguf constants (sync with gguf.py)
  163. //
  164. enum llm_arch {
  165. LLM_ARCH_LLAMA,
  166. LLM_ARCH_FALCON,
  167. LLM_ARCH_BAICHUAN,
  168. LLM_ARCH_GPT2,
  169. LLM_ARCH_GPTJ,
  170. LLM_ARCH_GPTNEOX,
  171. LLM_ARCH_MPT,
  172. LLM_ARCH_STARCODER,
  173. LLM_ARCH_PERSIMMON,
  174. LLM_ARCH_REFACT,
  175. LLM_ARCH_BERT,
  176. LLM_ARCH_NOMIC_BERT,
  177. LLM_ARCH_BLOOM,
  178. LLM_ARCH_STABLELM,
  179. LLM_ARCH_QWEN,
  180. LLM_ARCH_QWEN2,
  181. LLM_ARCH_PHI2,
  182. LLM_ARCH_PLAMO,
  183. LLM_ARCH_CODESHELL,
  184. LLM_ARCH_ORION,
  185. LLM_ARCH_INTERNLM2,
  186. LLM_ARCH_MINICPM,
  187. LLM_ARCH_UNKNOWN,
  188. };
  189. static std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
  190. { LLM_ARCH_LLAMA, "llama" },
  191. { LLM_ARCH_FALCON, "falcon" },
  192. { LLM_ARCH_GPT2, "gpt2" },
  193. { LLM_ARCH_GPTJ, "gptj" },
  194. { LLM_ARCH_GPTNEOX, "gptneox" },
  195. { LLM_ARCH_MPT, "mpt" },
  196. { LLM_ARCH_BAICHUAN, "baichuan" },
  197. { LLM_ARCH_STARCODER, "starcoder" },
  198. { LLM_ARCH_PERSIMMON, "persimmon" },
  199. { LLM_ARCH_REFACT, "refact" },
  200. { LLM_ARCH_BERT, "bert" },
  201. { LLM_ARCH_NOMIC_BERT, "nomic-bert" },
  202. { LLM_ARCH_BLOOM, "bloom" },
  203. { LLM_ARCH_STABLELM, "stablelm" },
  204. { LLM_ARCH_QWEN, "qwen" },
  205. { LLM_ARCH_QWEN2, "qwen2" },
  206. { LLM_ARCH_PHI2, "phi2" },
  207. { LLM_ARCH_PLAMO, "plamo" },
  208. { LLM_ARCH_CODESHELL, "codeshell" },
  209. { LLM_ARCH_ORION, "orion" },
  210. { LLM_ARCH_INTERNLM2, "internlm2" },
  211. { LLM_ARCH_MINICPM, "minicpm" },
  212. };
  213. enum llm_kv {
  214. LLM_KV_GENERAL_ARCHITECTURE,
  215. LLM_KV_GENERAL_QUANTIZATION_VERSION,
  216. LLM_KV_GENERAL_ALIGNMENT,
  217. LLM_KV_GENERAL_NAME,
  218. LLM_KV_GENERAL_AUTHOR,
  219. LLM_KV_GENERAL_URL,
  220. LLM_KV_GENERAL_DESCRIPTION,
  221. LLM_KV_GENERAL_LICENSE,
  222. LLM_KV_GENERAL_SOURCE_URL,
  223. LLM_KV_GENERAL_SOURCE_HF_REPO,
  224. LLM_KV_CONTEXT_LENGTH,
  225. LLM_KV_EMBEDDING_LENGTH,
  226. LLM_KV_BLOCK_COUNT,
  227. LLM_KV_FEED_FORWARD_LENGTH,
  228. LLM_KV_USE_PARALLEL_RESIDUAL,
  229. LLM_KV_TENSOR_DATA_LAYOUT,
  230. LLM_KV_EXPERT_COUNT,
  231. LLM_KV_EXPERT_USED_COUNT,
  232. LLM_KV_POOLING_TYPE,
  233. LLM_KV_ATTENTION_HEAD_COUNT,
  234. LLM_KV_ATTENTION_HEAD_COUNT_KV,
  235. LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
  236. LLM_KV_ATTENTION_CLAMP_KQV,
  237. LLM_KV_ATTENTION_KEY_LENGTH,
  238. LLM_KV_ATTENTION_VALUE_LENGTH,
  239. LLM_KV_ATTENTION_LAYERNORM_EPS,
  240. LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
  241. LLM_KV_ATTENTION_CAUSAL,
  242. LLM_KV_ROPE_DIMENSION_COUNT,
  243. LLM_KV_ROPE_FREQ_BASE,
  244. LLM_KV_ROPE_SCALE_LINEAR,
  245. LLM_KV_ROPE_SCALING_TYPE,
  246. LLM_KV_ROPE_SCALING_FACTOR,
  247. LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
  248. LLM_KV_ROPE_SCALING_FINETUNED,
  249. LLM_KV_TOKENIZER_MODEL,
  250. LLM_KV_TOKENIZER_LIST,
  251. LLM_KV_TOKENIZER_TOKEN_TYPE,
  252. LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,
  253. LLM_KV_TOKENIZER_SCORES,
  254. LLM_KV_TOKENIZER_MERGES,
  255. LLM_KV_TOKENIZER_BOS_ID,
  256. LLM_KV_TOKENIZER_EOS_ID,
  257. LLM_KV_TOKENIZER_UNK_ID,
  258. LLM_KV_TOKENIZER_SEP_ID,
  259. LLM_KV_TOKENIZER_PAD_ID,
  260. LLM_KV_TOKENIZER_ADD_BOS,
  261. LLM_KV_TOKENIZER_ADD_EOS,
  262. LLM_KV_TOKENIZER_ADD_PREFIX,
  263. LLM_KV_TOKENIZER_HF_JSON,
  264. LLM_KV_TOKENIZER_RWKV,
  265. };
  266. static std::map<llm_kv, const char *> LLM_KV_NAMES = {
  267. { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
  268. { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
  269. { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
  270. { LLM_KV_GENERAL_NAME, "general.name" },
  271. { LLM_KV_GENERAL_AUTHOR, "general.author" },
  272. { LLM_KV_GENERAL_URL, "general.url" },
  273. { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
  274. { LLM_KV_GENERAL_LICENSE, "general.license" },
  275. { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" },
  276. { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" },
  277. { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
  278. { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
  279. { LLM_KV_BLOCK_COUNT, "%s.block_count" },
  280. { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
  281. { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
  282. { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
  283. { LLM_KV_EXPERT_COUNT, "%s.expert_count" },
  284. { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
  285. { LLM_KV_POOLING_TYPE , "%s.pooling_type" },
  286. { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
  287. { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
  288. { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
  289. { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
  290. { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" },
  291. { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" },
  292. { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
  293. { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
  294. { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" },
  295. { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
  296. { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
  297. { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
  298. { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" },
  299. { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" },
  300. { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
  301. { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" },
  302. { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
  303. { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
  304. { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
  305. { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, "tokenizer.ggml.token_type_count" },
  306. { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
  307. { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
  308. { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
  309. { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
  310. { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
  311. { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
  312. { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
  313. { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
  314. { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
  315. { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" },
  316. { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
  317. { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
  318. };
  319. struct LLM_KV {
  320. LLM_KV(llm_arch arch) : arch(arch) {}
  321. llm_arch arch;
  322. std::string operator()(llm_kv kv) const {
  323. return ::format(LLM_KV_NAMES[kv], LLM_ARCH_NAMES[arch]);
  324. }
  325. };
  326. enum llm_tensor {
  327. LLM_TENSOR_TOKEN_EMBD,
  328. LLM_TENSOR_TOKEN_EMBD_NORM,
  329. LLM_TENSOR_TOKEN_TYPES,
  330. LLM_TENSOR_POS_EMBD,
  331. LLM_TENSOR_OUTPUT,
  332. LLM_TENSOR_OUTPUT_NORM,
  333. LLM_TENSOR_ROPE_FREQS,
  334. LLM_TENSOR_ATTN_Q,
  335. LLM_TENSOR_ATTN_K,
  336. LLM_TENSOR_ATTN_V,
  337. LLM_TENSOR_ATTN_QKV,
  338. LLM_TENSOR_ATTN_OUT,
  339. LLM_TENSOR_ATTN_NORM,
  340. LLM_TENSOR_ATTN_NORM_2,
  341. LLM_TENSOR_ATTN_OUT_NORM,
  342. LLM_TENSOR_ATTN_ROT_EMBD,
  343. LLM_TENSOR_FFN_GATE_INP,
  344. LLM_TENSOR_FFN_NORM,
  345. LLM_TENSOR_FFN_GATE,
  346. LLM_TENSOR_FFN_DOWN,
  347. LLM_TENSOR_FFN_UP,
  348. LLM_TENSOR_FFN_ACT,
  349. LLM_TENSOR_FFN_DOWN_EXP,
  350. LLM_TENSOR_FFN_GATE_EXP,
  351. LLM_TENSOR_FFN_UP_EXP,
  352. LLM_TENSOR_ATTN_Q_NORM,
  353. LLM_TENSOR_ATTN_K_NORM,
  354. LLM_TENSOR_LAYER_OUT_NORM,
  355. };
  356. static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
  357. {
  358. LLM_ARCH_LLAMA,
  359. {
  360. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  361. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  362. { LLM_TENSOR_OUTPUT, "output" },
  363. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  364. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  365. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  366. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  367. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  368. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  369. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  370. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  371. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  372. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  373. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  374. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  375. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  376. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  377. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  378. },
  379. },
  380. {
  381. LLM_ARCH_BAICHUAN,
  382. {
  383. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  384. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  385. { LLM_TENSOR_OUTPUT, "output" },
  386. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  387. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  388. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  389. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  390. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  391. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  392. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  393. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  394. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  395. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  396. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  397. },
  398. },
  399. {
  400. LLM_ARCH_FALCON,
  401. {
  402. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  403. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  404. { LLM_TENSOR_OUTPUT, "output" },
  405. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  406. { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
  407. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  408. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  409. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  410. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  411. },
  412. },
  413. {
  414. LLM_ARCH_GPT2,
  415. {
  416. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  417. { LLM_TENSOR_POS_EMBD, "position_embd" },
  418. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  419. { LLM_TENSOR_OUTPUT, "output" },
  420. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  421. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  422. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  423. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  424. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  425. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  426. },
  427. },
  428. {
  429. LLM_ARCH_GPTJ,
  430. {
  431. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  432. },
  433. },
  434. {
  435. LLM_ARCH_GPTNEOX,
  436. {
  437. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  438. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  439. { LLM_TENSOR_OUTPUT, "output" },
  440. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  441. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  442. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  443. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  444. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  445. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  446. },
  447. },
  448. {
  449. LLM_ARCH_PERSIMMON,
  450. {
  451. { LLM_TENSOR_TOKEN_EMBD, "token_embd"},
  452. { LLM_TENSOR_OUTPUT_NORM, "output_norm"},
  453. { LLM_TENSOR_OUTPUT, "output"},
  454. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm"},
  455. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv"},
  456. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output"},
  457. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
  458. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
  459. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm"},
  460. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down"},
  461. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up"},
  462. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd"},
  463. },
  464. },
  465. {
  466. LLM_ARCH_MPT,
  467. {
  468. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  469. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  470. { LLM_TENSOR_OUTPUT, "output" },
  471. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  472. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  473. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  474. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  475. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  476. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  477. { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" },
  478. },
  479. },
  480. {
  481. LLM_ARCH_STARCODER,
  482. {
  483. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  484. { LLM_TENSOR_POS_EMBD, "position_embd" },
  485. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  486. { LLM_TENSOR_OUTPUT, "output" },
  487. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  488. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  489. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  490. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  491. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  492. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  493. },
  494. },
  495. {
  496. LLM_ARCH_REFACT,
  497. {
  498. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  499. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  500. { LLM_TENSOR_OUTPUT, "output" },
  501. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  502. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  503. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  504. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  505. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  506. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  507. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  508. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  509. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  510. },
  511. },
  512. {
  513. LLM_ARCH_BERT,
  514. {
  515. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  516. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  517. { LLM_TENSOR_TOKEN_TYPES, "token_types" },
  518. { LLM_TENSOR_POS_EMBD, "position_embd" },
  519. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  520. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  521. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  522. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  523. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  524. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  525. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  526. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  527. },
  528. },
  529. {
  530. LLM_ARCH_NOMIC_BERT,
  531. {
  532. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  533. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  534. { LLM_TENSOR_TOKEN_TYPES, "token_types" },
  535. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  536. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  537. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  538. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  539. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  540. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  541. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  542. },
  543. },
  544. {
  545. LLM_ARCH_BLOOM,
  546. {
  547. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  548. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  549. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  550. { LLM_TENSOR_OUTPUT, "output" },
  551. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  552. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  553. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  554. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  555. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  556. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  557. },
  558. },
  559. {
  560. LLM_ARCH_STABLELM,
  561. {
  562. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  563. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  564. { LLM_TENSOR_OUTPUT, "output" },
  565. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  566. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  567. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  568. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  569. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  570. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  571. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  572. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  573. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  574. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  575. },
  576. },
  577. {
  578. LLM_ARCH_QWEN,
  579. {
  580. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  581. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  582. { LLM_TENSOR_OUTPUT, "output" },
  583. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  584. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  585. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  586. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  587. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  588. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  589. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  590. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  591. },
  592. },
  593. {
  594. LLM_ARCH_QWEN2,
  595. {
  596. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  597. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  598. { LLM_TENSOR_OUTPUT, "output" },
  599. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  600. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  601. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  602. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  603. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  604. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  605. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  606. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  607. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  608. },
  609. },
  610. {
  611. LLM_ARCH_PHI2,
  612. {
  613. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  614. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  615. { LLM_TENSOR_OUTPUT, "output" },
  616. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  617. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  618. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  619. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  620. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  621. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  622. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  623. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  624. },
  625. },
  626. {
  627. LLM_ARCH_PLAMO,
  628. {
  629. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  630. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  631. { LLM_TENSOR_OUTPUT, "output" },
  632. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  633. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  634. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  635. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  636. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  637. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  638. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  639. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  640. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  641. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  642. },
  643. },
  644. {
  645. LLM_ARCH_CODESHELL,
  646. {
  647. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  648. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  649. { LLM_TENSOR_OUTPUT, "output" },
  650. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  651. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  652. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  653. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  654. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  655. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  656. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  657. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  658. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  659. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  660. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  661. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  662. },
  663. },
  664. {
  665. LLM_ARCH_ORION,
  666. {
  667. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  668. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  669. { LLM_TENSOR_OUTPUT, "output" },
  670. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  671. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  672. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  673. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  674. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  675. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  676. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  677. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  678. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  679. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  680. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  681. },
  682. },
  683. {
  684. LLM_ARCH_INTERNLM2,
  685. {
  686. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  687. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  688. { LLM_TENSOR_OUTPUT, "output" },
  689. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  690. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  691. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  692. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  693. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  694. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  695. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  696. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  697. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  698. },
  699. },
  700. {
  701. LLM_ARCH_MINICPM,
  702. {
  703. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  704. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  705. { LLM_TENSOR_OUTPUT, "output" },
  706. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  707. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  708. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  709. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  710. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  711. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  712. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  713. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  714. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  715. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  716. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  717. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  718. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  719. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  720. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  721. },
  722. },
  723. {
  724. LLM_ARCH_UNKNOWN,
  725. {
  726. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  727. },
  728. },
  729. };
  730. static llm_arch llm_arch_from_string(const std::string & name) {
  731. for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
  732. if (kv.second == name) {
  733. return kv.first;
  734. }
  735. }
  736. return LLM_ARCH_UNKNOWN;
  737. }
  738. // helper to handle gguf constants
  739. // usage:
  740. //
  741. // const auto tn = LLM_TN(LLM_ARCH_LLAMA);
  742. //
  743. // std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
  744. // std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
  745. // std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
  746. //
  747. struct LLM_TN {
  748. LLM_TN(llm_arch arch) : arch(arch) {}
  749. llm_arch arch;
  750. std::string operator()(llm_tensor tensor) const {
  751. if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) {
  752. return "__missing__";
  753. }
  754. return LLM_TENSOR_NAMES[arch].at(tensor);
  755. }
  756. std::string operator()(llm_tensor tensor, const std::string & suffix) const {
  757. if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) {
  758. return "__missing__";
  759. }
  760. return LLM_TENSOR_NAMES[arch].at(tensor) + "." + suffix;
  761. }
  762. std::string operator()(llm_tensor tensor, int bid) const {
  763. if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) {
  764. return "__missing__";
  765. }
  766. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid);
  767. }
  768. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
  769. if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) {
  770. return "__missing__";
  771. }
  772. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid) + "." + suffix;
  773. }
  774. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
  775. if (LLM_TENSOR_NAMES[arch].find(tensor) == LLM_TENSOR_NAMES[arch].end()) {
  776. return "__missing__";
  777. }
  778. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid, xid) + "." + suffix;
  779. }
  780. };
  781. //
  782. // gguf helpers
  783. //
  784. static std::map<int32_t, const char *> LLAMA_ROPE_SCALING_TYPES = {
  785. { LLAMA_ROPE_SCALING_NONE, "none" },
  786. { LLAMA_ROPE_SCALING_LINEAR, "linear" },
  787. { LLAMA_ROPE_SCALING_YARN, "yarn" },
  788. };
  789. static int32_t llama_rope_scaling_type_from_string(const std::string & name) {
  790. for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
  791. if (kv.second == name) {
  792. return kv.first;
  793. }
  794. }
  795. return LLAMA_ROPE_SCALING_UNSPECIFIED;
  796. }
  797. static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
  798. switch (type) {
  799. case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
  800. case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
  801. case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
  802. case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
  803. case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
  804. case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
  805. case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
  806. case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
  807. case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
  808. case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
  809. case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
  810. default: return format("unknown type %d", type);
  811. }
  812. }
  813. static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
  814. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  815. switch (type) {
  816. case GGUF_TYPE_STRING:
  817. return gguf_get_val_str(ctx_gguf, i);
  818. case GGUF_TYPE_ARRAY:
  819. {
  820. const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
  821. int arr_n = gguf_get_arr_n(ctx_gguf, i);
  822. const void * data = gguf_get_arr_data(ctx_gguf, i);
  823. std::stringstream ss;
  824. ss << "[";
  825. for (int j = 0; j < arr_n; j++) {
  826. if (arr_type == GGUF_TYPE_STRING) {
  827. std::string val = gguf_get_arr_str(ctx_gguf, i, j);
  828. // escape quotes
  829. replace_all(val, "\\", "\\\\");
  830. replace_all(val, "\"", "\\\"");
  831. ss << '"' << val << '"';
  832. } else if (arr_type == GGUF_TYPE_ARRAY) {
  833. ss << "???";
  834. } else {
  835. ss << gguf_data_to_str(arr_type, data, j);
  836. }
  837. if (j < arr_n - 1) {
  838. ss << ", ";
  839. }
  840. }
  841. ss << "]";
  842. return ss.str();
  843. }
  844. default:
  845. return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
  846. }
  847. }
  848. //
  849. // ggml helpers
  850. //
  851. static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
  852. struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
  853. if (plan.work_size > 0) {
  854. buf.resize(plan.work_size);
  855. plan.work_data = buf.data();
  856. }
  857. ggml_graph_compute(graph, &plan);
  858. }
  859. //
  860. // llama helpers
  861. //
  862. #if defined(_WIN32)
  863. static std::string llama_format_win_err(DWORD err) {
  864. LPSTR buf;
  865. size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
  866. NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
  867. if (!size) {
  868. return "FormatMessageA failed";
  869. }
  870. std::string ret(buf, size);
  871. LocalFree(buf);
  872. return ret;
  873. }
  874. #endif
  875. template <typename T>
  876. struct no_init {
  877. T value;
  878. no_init() { /* do nothing */ }
  879. };
  880. struct llama_file {
  881. // use FILE * so we don't have to re-open the file to mmap
  882. FILE * fp;
  883. size_t size;
  884. llama_file(const char * fname, const char * mode) {
  885. fp = std::fopen(fname, mode);
  886. if (fp == NULL) {
  887. throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
  888. }
  889. seek(0, SEEK_END);
  890. size = tell();
  891. seek(0, SEEK_SET);
  892. }
  893. size_t tell() const {
  894. #ifdef _WIN32
  895. __int64 ret = _ftelli64(fp);
  896. #else
  897. long ret = std::ftell(fp);
  898. #endif
  899. GGML_ASSERT(ret != -1); // this really shouldn't fail
  900. return (size_t) ret;
  901. }
  902. void seek(size_t offset, int whence) const {
  903. #ifdef _WIN32
  904. int ret = _fseeki64(fp, (__int64) offset, whence);
  905. #else
  906. int ret = std::fseek(fp, (long) offset, whence);
  907. #endif
  908. GGML_ASSERT(ret == 0); // same
  909. }
  910. void read_raw(void * ptr, size_t len) const {
  911. if (len == 0) {
  912. return;
  913. }
  914. errno = 0;
  915. std::size_t ret = std::fread(ptr, len, 1, fp);
  916. if (ferror(fp)) {
  917. throw std::runtime_error(format("read error: %s", strerror(errno)));
  918. }
  919. if (ret != 1) {
  920. throw std::runtime_error("unexpectedly reached end of file");
  921. }
  922. }
  923. uint32_t read_u32() const {
  924. uint32_t ret;
  925. read_raw(&ret, sizeof(ret));
  926. return ret;
  927. }
  928. void write_raw(const void * ptr, size_t len) const {
  929. if (len == 0) {
  930. return;
  931. }
  932. errno = 0;
  933. size_t ret = std::fwrite(ptr, len, 1, fp);
  934. if (ret != 1) {
  935. throw std::runtime_error(format("write error: %s", strerror(errno)));
  936. }
  937. }
  938. void write_u32(std::uint32_t val) const {
  939. write_raw(&val, sizeof(val));
  940. }
  941. ~llama_file() {
  942. if (fp) {
  943. std::fclose(fp);
  944. }
  945. }
  946. };
  947. struct llama_mmap {
  948. void * addr;
  949. size_t size;
  950. llama_mmap(const llama_mmap &) = delete;
  951. #ifdef _POSIX_MAPPED_FILES
  952. static constexpr bool SUPPORTED = true;
  953. // list of mapped fragments (first_offset, last_offset)
  954. std::vector<std::pair<size_t, size_t>> mapped_fragments;
  955. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
  956. size = file->size;
  957. int fd = fileno(file->fp);
  958. int flags = MAP_SHARED;
  959. // prefetch/readahead impairs performance on NUMA systems
  960. if (numa) { prefetch = 0; }
  961. #ifdef __linux__
  962. // advise the kernel to read the file sequentially (increases readahead)
  963. if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
  964. LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n",
  965. strerror(errno));
  966. }
  967. if (prefetch) { flags |= MAP_POPULATE; }
  968. #endif
  969. addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
  970. if (addr == MAP_FAILED) { // NOLINT
  971. throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
  972. }
  973. if (prefetch > 0) {
  974. // advise the kernel to preload the mapped memory
  975. if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
  976. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
  977. strerror(errno));
  978. }
  979. }
  980. if (numa) {
  981. // advise the kernel not to use readahead
  982. // (because the next page might not belong on the same node)
  983. if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
  984. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
  985. strerror(errno));
  986. }
  987. }
  988. // initialize list of mapped_fragments
  989. mapped_fragments.emplace_back(0, file->size);
  990. }
  991. static void align_range(size_t * first, size_t * last, size_t page_size) {
  992. // align first to the next page
  993. size_t offset_in_page = *first & (page_size - 1);
  994. size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page;
  995. *first += offset_to_page;
  996. // align last to the previous page
  997. *last = *last & ~(page_size - 1);
  998. if (*last <= *first) {
  999. *last = *first;
  1000. }
  1001. }
  1002. // partially unmap the file in the range [first, last)
  1003. void unmap_fragment(size_t first, size_t last) {
  1004. // note: this function must not be called multiple times with overlapping ranges
  1005. // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
  1006. int page_size = sysconf(_SC_PAGESIZE);
  1007. align_range(&first, &last, page_size);
  1008. size_t len = last - first;
  1009. if (len == 0) {
  1010. return;
  1011. }
  1012. GGML_ASSERT(first % page_size == 0);
  1013. GGML_ASSERT(last % page_size == 0);
  1014. GGML_ASSERT(last > first);
  1015. void * next_page_start = (uint8_t *) addr + first;
  1016. // unmap the range
  1017. if (munmap(next_page_start, len)) {
  1018. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  1019. }
  1020. // update the list of mapped fragments to avoid unmapping the same range again in the destructor
  1021. std::vector<std::pair<size_t, size_t>> new_mapped_fragments;
  1022. for (const auto & frag : mapped_fragments) {
  1023. if (frag.first < first && frag.second > last) {
  1024. // the range is in the middle of the fragment, split it
  1025. new_mapped_fragments.emplace_back(frag.first, first);
  1026. new_mapped_fragments.emplace_back(last, frag.second);
  1027. } else if (frag.first < first && frag.second > first) {
  1028. // the range starts in the middle of the fragment
  1029. new_mapped_fragments.emplace_back(frag.first, first);
  1030. } else if (frag.first < last && frag.second > last) {
  1031. // the range ends in the middle of the fragment
  1032. new_mapped_fragments.emplace_back(last, frag.second);
  1033. } else if (frag.first >= first && frag.second <= last) {
  1034. // the range covers the entire fragment
  1035. } else {
  1036. // the range is outside the fragment
  1037. new_mapped_fragments.push_back(frag);
  1038. }
  1039. }
  1040. mapped_fragments = std::move(new_mapped_fragments);
  1041. }
  1042. ~llama_mmap() {
  1043. for (const auto & frag : mapped_fragments) {
  1044. if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
  1045. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  1046. }
  1047. }
  1048. }
  1049. #elif defined(_WIN32)
  1050. static constexpr bool SUPPORTED = true;
  1051. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) {
  1052. GGML_UNUSED(numa);
  1053. size = file->size;
  1054. HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
  1055. HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
  1056. if (hMapping == NULL) {
  1057. DWORD error = GetLastError();
  1058. throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
  1059. }
  1060. addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
  1061. DWORD error = GetLastError();
  1062. CloseHandle(hMapping);
  1063. if (addr == NULL) {
  1064. throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
  1065. }
  1066. if (prefetch > 0) {
  1067. #if _WIN32_WINNT >= 0x602
  1068. // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
  1069. BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
  1070. HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
  1071. // may fail on pre-Windows 8 systems
  1072. pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
  1073. if (pPrefetchVirtualMemory) {
  1074. // advise the kernel to preload the mapped memory
  1075. WIN32_MEMORY_RANGE_ENTRY range;
  1076. range.VirtualAddress = addr;
  1077. range.NumberOfBytes = (SIZE_T) std::min(size, prefetch);
  1078. if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
  1079. LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n",
  1080. llama_format_win_err(GetLastError()).c_str());
  1081. }
  1082. }
  1083. #else
  1084. throw std::runtime_error("PrefetchVirtualMemory unavailable");
  1085. #endif
  1086. }
  1087. }
  1088. void unmap_fragment(size_t first, size_t last) {
  1089. // not supported
  1090. GGML_UNUSED(first);
  1091. GGML_UNUSED(last);
  1092. }
  1093. ~llama_mmap() {
  1094. if (!UnmapViewOfFile(addr)) {
  1095. LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n",
  1096. llama_format_win_err(GetLastError()).c_str());
  1097. }
  1098. }
  1099. #else
  1100. static constexpr bool SUPPORTED = false;
  1101. llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) {
  1102. GGML_UNUSED(file);
  1103. GGML_UNUSED(prefetch);
  1104. GGML_UNUSED(numa);
  1105. throw std::runtime_error("mmap not supported");
  1106. }
  1107. void unmap_fragment(size_t first, size_t last) {
  1108. GGML_UNUSED(first);
  1109. GGML_UNUSED(last);
  1110. throw std::runtime_error("mmap not supported");
  1111. }
  1112. #endif
  1113. };
  1114. // Represents some region of memory being locked using mlock or VirtualLock;
  1115. // will automatically unlock on destruction.
  1116. struct llama_mlock {
  1117. void * addr = NULL;
  1118. size_t size = 0;
  1119. bool failed_already = false;
  1120. llama_mlock() {}
  1121. llama_mlock(const llama_mlock &) = delete;
  1122. ~llama_mlock() {
  1123. if (size) {
  1124. raw_unlock(addr, size);
  1125. }
  1126. }
  1127. void init(void * ptr) {
  1128. GGML_ASSERT(addr == NULL && size == 0); // NOLINT
  1129. addr = ptr;
  1130. }
  1131. void grow_to(size_t target_size) {
  1132. GGML_ASSERT(addr);
  1133. if (failed_already) {
  1134. return;
  1135. }
  1136. size_t granularity = lock_granularity();
  1137. target_size = (target_size + granularity - 1) & ~(granularity - 1);
  1138. if (target_size > size) {
  1139. if (raw_lock((uint8_t *) addr + size, target_size - size)) {
  1140. size = target_size;
  1141. } else {
  1142. failed_already = true;
  1143. }
  1144. }
  1145. }
  1146. #ifdef _POSIX_MEMLOCK_RANGE
  1147. static constexpr bool SUPPORTED = true;
  1148. static size_t lock_granularity() {
  1149. return (size_t) sysconf(_SC_PAGESIZE);
  1150. }
  1151. #ifdef __APPLE__
  1152. #define MLOCK_SUGGESTION \
  1153. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  1154. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MEMLOCK (ulimit -l).\n"
  1155. #else
  1156. #define MLOCK_SUGGESTION \
  1157. "Try increasing RLIMIT_MEMLOCK ('ulimit -l' as root).\n"
  1158. #endif
  1159. bool raw_lock(const void * addr, size_t size) const {
  1160. if (!mlock(addr, size)) {
  1161. return true;
  1162. }
  1163. char* errmsg = std::strerror(errno);
  1164. bool suggest = (errno == ENOMEM);
  1165. // Check if the resource limit is fine after all
  1166. struct rlimit lock_limit;
  1167. if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
  1168. suggest = false;
  1169. }
  1170. if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
  1171. suggest = false;
  1172. }
  1173. LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
  1174. size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
  1175. return false;
  1176. }
  1177. #undef MLOCK_SUGGESTION
  1178. static void raw_unlock(void * addr, size_t size) {
  1179. if (munlock(addr, size)) {
  1180. LLAMA_LOG_WARN("warning: failed to munlock buffer: %s\n", std::strerror(errno));
  1181. }
  1182. }
  1183. #elif defined(_WIN32)
  1184. static constexpr bool SUPPORTED = true;
  1185. static size_t lock_granularity() {
  1186. SYSTEM_INFO si;
  1187. GetSystemInfo(&si);
  1188. return (size_t) si.dwPageSize;
  1189. }
  1190. bool raw_lock(void * ptr, size_t len) const {
  1191. for (int tries = 1; ; tries++) {
  1192. if (VirtualLock(ptr, len)) {
  1193. return true;
  1194. }
  1195. if (tries == 2) {
  1196. LLAMA_LOG_WARN("warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
  1197. len, size, llama_format_win_err(GetLastError()).c_str());
  1198. return false;
  1199. }
  1200. // It failed but this was only the first try; increase the working
  1201. // set size and try again.
  1202. SIZE_T min_ws_size, max_ws_size;
  1203. if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
  1204. LLAMA_LOG_WARN("warning: GetProcessWorkingSetSize failed: %s\n",
  1205. llama_format_win_err(GetLastError()).c_str());
  1206. return false;
  1207. }
  1208. // Per MSDN: "The maximum number of pages that a process can lock
  1209. // is equal to the number of pages in its minimum working set minus
  1210. // a small overhead."
  1211. // Hopefully a megabyte is enough overhead:
  1212. size_t increment = len + 1048576;
  1213. // The minimum must be <= the maximum, so we need to increase both:
  1214. min_ws_size += increment;
  1215. max_ws_size += increment;
  1216. if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
  1217. LLAMA_LOG_WARN("warning: SetProcessWorkingSetSize failed: %s\n",
  1218. llama_format_win_err(GetLastError()).c_str());
  1219. return false;
  1220. }
  1221. }
  1222. }
  1223. static void raw_unlock(void * ptr, size_t len) {
  1224. if (!VirtualUnlock(ptr, len)) {
  1225. LLAMA_LOG_WARN("warning: failed to VirtualUnlock buffer: %s\n",
  1226. llama_format_win_err(GetLastError()).c_str());
  1227. }
  1228. }
  1229. #else
  1230. static constexpr bool SUPPORTED = false;
  1231. static size_t lock_granularity() {
  1232. return (size_t) 65536;
  1233. }
  1234. bool raw_lock(const void * addr, size_t len) const {
  1235. LLAMA_LOG_WARN("warning: mlock not supported on this system\n");
  1236. return false;
  1237. }
  1238. static void raw_unlock(const void * addr, size_t len) {}
  1239. #endif
  1240. };
  1241. static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) {
  1242. std::vector<char> result(8, 0);
  1243. const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
  1244. if (n_tokens < 0) {
  1245. result.resize(-n_tokens);
  1246. int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
  1247. GGML_ASSERT(check == -n_tokens);
  1248. }
  1249. else {
  1250. result.resize(n_tokens);
  1251. }
  1252. return std::string(result.data(), result.size());
  1253. }
  1254. static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer) {
  1255. ggml_backend_buffer_type_t buft = nullptr;
  1256. #if defined(GGML_USE_CUBLAS)
  1257. // host buffers should only be used when data is expected to be copied to/from the GPU
  1258. if (host_buffer) {
  1259. buft = ggml_backend_cuda_host_buffer_type();
  1260. }
  1261. #elif defined(GGML_USE_SYCL)
  1262. buft = ggml_backend_sycl_host_buffer_type();
  1263. #elif defined(GGML_USE_CPU_HBM)
  1264. buft = ggml_backend_cpu_hbm_buffer_type();
  1265. #elif defined(GGML_USE_VULKAN)
  1266. if (host_buffer) {
  1267. buft = ggml_backend_vk_host_buffer_type();
  1268. }
  1269. #endif
  1270. if (buft == nullptr) {
  1271. buft = ggml_backend_cpu_buffer_type();
  1272. }
  1273. return buft;
  1274. GGML_UNUSED(host_buffer);
  1275. }
  1276. static ggml_backend_buffer_type_t llama_default_buffer_type_offload(int gpu) {
  1277. ggml_backend_buffer_type_t buft = nullptr;
  1278. #ifdef GGML_USE_METAL
  1279. buft = ggml_backend_metal_buffer_type();
  1280. #elif defined(GGML_USE_CUBLAS)
  1281. buft = ggml_backend_cuda_buffer_type(gpu);
  1282. #elif defined(GGML_USE_VULKAN)
  1283. buft = ggml_backend_vk_buffer_type(gpu);
  1284. #elif defined(GGML_USE_SYCL)
  1285. buft = ggml_backend_sycl_buffer_type(gpu);
  1286. #elif defined(GGML_USE_CLBLAST)
  1287. buft = ggml_backend_opencl_buffer_type();
  1288. #elif defined(GGML_USE_KOMPUTE)
  1289. buft = ggml_backend_kompute_buffer_type(gpu);
  1290. if (buft == nullptr) {
  1291. LLAMA_LOG_WARN("%s: cannot use GPU %d, check `vulkaninfo --summary`\n", __func__, gpu);
  1292. }
  1293. #endif
  1294. if (buft == nullptr) {
  1295. buft = llama_default_buffer_type_cpu(true);
  1296. }
  1297. return buft;
  1298. GGML_UNUSED(gpu);
  1299. }
  1300. static ggml_backend_buffer_type_t llama_default_buffer_type_split(int fallback_gpu, const float * tensor_split) {
  1301. ggml_backend_buffer_type_t buft = nullptr;
  1302. #ifdef GGML_USE_CUBLAS
  1303. if (ggml_backend_cuda_get_device_count() > 1) {
  1304. buft = ggml_backend_cuda_split_buffer_type(tensor_split);
  1305. }
  1306. #endif
  1307. if (buft == nullptr) {
  1308. buft = llama_default_buffer_type_offload(fallback_gpu);
  1309. }
  1310. return buft;
  1311. GGML_UNUSED(tensor_split);
  1312. }
  1313. static size_t llama_get_device_count() {
  1314. #if defined(GGML_USE_CUBLAS)
  1315. return ggml_backend_cuda_get_device_count();
  1316. #elif defined(GGML_USE_VULKAN)
  1317. return ggml_backend_vk_get_device_count();
  1318. #else
  1319. return 1;
  1320. #endif
  1321. }
  1322. static size_t llama_get_device_memory(int device) {
  1323. #if defined(GGML_USE_CUBLAS)
  1324. size_t total;
  1325. size_t free;
  1326. ggml_backend_cuda_get_device_memory(device, &total, &free);
  1327. return free;
  1328. #elif defined(GGML_USE_VULKAN)
  1329. size_t total;
  1330. size_t free;
  1331. ggml_backend_vk_get_device_memory(device, &total, &free);
  1332. return free;
  1333. #else
  1334. return 1;
  1335. GGML_UNUSED(device);
  1336. #endif
  1337. }
  1338. //
  1339. // globals
  1340. //
  1341. struct llama_state {
  1342. llama_state() {
  1343. #ifdef GGML_USE_METAL
  1344. ggml_backend_metal_log_set_callback(log_callback, log_callback_user_data);
  1345. #endif
  1346. }
  1347. // We save the log callback globally
  1348. ggml_log_callback log_callback = llama_log_callback_default;
  1349. void * log_callback_user_data = nullptr;
  1350. };
  1351. static llama_state g_state;
  1352. // available llama models
  1353. enum e_model {
  1354. MODEL_UNKNOWN,
  1355. MODEL_17M,
  1356. MODEL_22M,
  1357. MODEL_33M,
  1358. MODEL_109M,
  1359. MODEL_137M,
  1360. MODEL_335M,
  1361. MODEL_0_5B,
  1362. MODEL_1B,
  1363. MODEL_2B,
  1364. MODEL_3B,
  1365. MODEL_4B,
  1366. MODEL_7B,
  1367. MODEL_8B,
  1368. MODEL_13B,
  1369. MODEL_14B,
  1370. MODEL_15B,
  1371. MODEL_20B,
  1372. MODEL_30B,
  1373. MODEL_34B,
  1374. MODEL_40B,
  1375. MODEL_65B,
  1376. MODEL_70B,
  1377. MODEL_SMALL,
  1378. MODEL_MEDIUM,
  1379. MODEL_LARGE,
  1380. MODEL_XL,
  1381. };
  1382. static const size_t kiB = 1024;
  1383. static const size_t MiB = 1024*kiB;
  1384. static const size_t GiB = 1024*MiB;
  1385. struct llama_hparams {
  1386. bool vocab_only;
  1387. bool rope_finetuned;
  1388. uint32_t n_vocab;
  1389. uint32_t n_ctx_train; // context size the model was trained on
  1390. uint32_t n_embd;
  1391. uint32_t n_head;
  1392. uint32_t n_head_kv;
  1393. uint32_t n_layer;
  1394. uint32_t n_rot;
  1395. uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
  1396. uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
  1397. uint32_t n_ff;
  1398. uint32_t n_expert = 0;
  1399. uint32_t n_expert_used = 0;
  1400. uint32_t n_vocab_type = 0; // for BERT-style token types
  1401. float f_norm_eps;
  1402. float f_norm_rms_eps;
  1403. float rope_freq_base_train;
  1404. float rope_freq_scale_train;
  1405. uint32_t n_yarn_orig_ctx;
  1406. int32_t rope_scaling_type_train;
  1407. float f_clamp_kqv = 0.0f;
  1408. float f_max_alibi_bias = 0.0f;
  1409. bool causal_attn = true;
  1410. bool need_kq_pos = false;
  1411. uint32_t pooling_type = LLAMA_POOLING_NONE;
  1412. bool operator!=(const llama_hparams & other) const {
  1413. if (this->vocab_only != other.vocab_only) return true;
  1414. if (this->n_vocab != other.n_vocab) return true;
  1415. if (this->n_ctx_train != other.n_ctx_train) return true;
  1416. if (this->n_embd != other.n_embd) return true;
  1417. if (this->n_head != other.n_head) return true;
  1418. if (this->n_head_kv != other.n_head_kv) return true;
  1419. if (this->n_layer != other.n_layer) return true;
  1420. if (this->n_rot != other.n_rot) return true;
  1421. if (this->n_embd_head_k != other.n_embd_head_k) return true;
  1422. if (this->n_embd_head_v != other.n_embd_head_v) return true;
  1423. if (this->n_ff != other.n_ff) return true;
  1424. if (this->n_expert != other.n_expert) return true;
  1425. if (this->n_expert_used != other.n_expert_used) return true;
  1426. if (this->rope_finetuned != other.rope_finetuned) return true;
  1427. if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true;
  1428. const float EPSILON = 1e-9f;
  1429. if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
  1430. if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
  1431. if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true;
  1432. if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true;
  1433. return false;
  1434. }
  1435. uint32_t n_gqa() const {
  1436. return n_head/n_head_kv;
  1437. }
  1438. uint32_t n_embd_k_gqa() const { // dimension of key embeddings across all k-v heads
  1439. return n_embd_head_k * n_head_kv;
  1440. }
  1441. uint32_t n_embd_v_gqa() const { // dimension of value embeddings across all k-v heads
  1442. return n_embd_head_v * n_head_kv;
  1443. }
  1444. };
  1445. struct llama_cparams {
  1446. uint32_t n_ctx; // context size used during inference
  1447. uint32_t n_batch;
  1448. uint32_t n_threads; // number of threads to use for generation
  1449. uint32_t n_threads_batch; // number of threads to use for batch processing
  1450. float rope_freq_base;
  1451. float rope_freq_scale;
  1452. uint32_t n_yarn_orig_ctx;
  1453. // These hyperparameters are not exposed in GGUF, because all
  1454. // existing YaRN models use the same values for them.
  1455. float yarn_ext_factor;
  1456. float yarn_attn_factor;
  1457. float yarn_beta_fast;
  1458. float yarn_beta_slow;
  1459. bool mul_mat_q;
  1460. bool offload_kqv;
  1461. bool do_pooling;
  1462. ggml_backend_sched_eval_callback cb_eval;
  1463. void * cb_eval_user_data;
  1464. };
  1465. struct llama_layer {
  1466. // normalization
  1467. struct ggml_tensor * attn_norm;
  1468. struct ggml_tensor * attn_norm_b;
  1469. struct ggml_tensor * attn_norm_2;
  1470. struct ggml_tensor * attn_norm_2_b;
  1471. struct ggml_tensor * attn_q_norm;
  1472. struct ggml_tensor * attn_q_norm_b;
  1473. struct ggml_tensor * attn_k_norm;
  1474. struct ggml_tensor * attn_k_norm_b;
  1475. struct ggml_tensor * attn_out_norm;
  1476. struct ggml_tensor * attn_out_norm_b;
  1477. // attention
  1478. struct ggml_tensor * wq;
  1479. struct ggml_tensor * wk;
  1480. struct ggml_tensor * wv;
  1481. struct ggml_tensor * wo;
  1482. struct ggml_tensor * wqkv;
  1483. // attention bias
  1484. struct ggml_tensor * bq;
  1485. struct ggml_tensor * bk;
  1486. struct ggml_tensor * bv;
  1487. struct ggml_tensor * bo;
  1488. struct ggml_tensor * bqkv;
  1489. // normalization
  1490. struct ggml_tensor * ffn_norm;
  1491. struct ggml_tensor * ffn_norm_b;
  1492. struct ggml_tensor * layer_out_norm;
  1493. struct ggml_tensor * layer_out_norm_b;
  1494. // ff
  1495. struct ggml_tensor * ffn_gate; // w1
  1496. struct ggml_tensor * ffn_down; // w2
  1497. struct ggml_tensor * ffn_up; // w3
  1498. // ff MoE
  1499. struct ggml_tensor * ffn_gate_inp;
  1500. struct ggml_tensor * ffn_gate_exp[LLAMA_MAX_EXPERTS];
  1501. struct ggml_tensor * ffn_down_exp[LLAMA_MAX_EXPERTS];
  1502. struct ggml_tensor * ffn_up_exp [LLAMA_MAX_EXPERTS];
  1503. // ff bias
  1504. struct ggml_tensor * ffn_down_b; // b2
  1505. struct ggml_tensor * ffn_up_b; // b3
  1506. struct ggml_tensor * ffn_act;
  1507. };
  1508. struct llama_kv_cell {
  1509. llama_pos pos = -1;
  1510. llama_pos delta = 0;
  1511. std::set<llama_seq_id> seq_id;
  1512. bool has_seq_id(const llama_seq_id & id) const {
  1513. return seq_id.find(id) != seq_id.end();
  1514. }
  1515. };
  1516. // ring-buffer of cached KV data
  1517. struct llama_kv_cache {
  1518. bool has_shift = false;
  1519. // Note: The value of head isn't only used to optimize searching
  1520. // for a free KV slot. llama_decode_internal also uses it, so it
  1521. // cannot be freely changed after a slot has been allocated.
  1522. uint32_t head = 0;
  1523. uint32_t size = 0;
  1524. uint32_t used = 0; // used cells (i.e. at least one seq_id)
  1525. // computed before each graph build
  1526. uint32_t n = 0;
  1527. std::vector<llama_kv_cell> cells;
  1528. std::vector<struct ggml_tensor *> k_l; // per layer
  1529. std::vector<struct ggml_tensor *> v_l;
  1530. std::vector<struct ggml_context *> ctxs;
  1531. std::vector<ggml_backend_buffer_t> bufs;
  1532. size_t total_size() const {
  1533. size_t size = 0;
  1534. for (ggml_backend_buffer_t buf : bufs) {
  1535. size += ggml_backend_buffer_get_size(buf);
  1536. }
  1537. return size;
  1538. }
  1539. ~llama_kv_cache() {
  1540. for (struct ggml_context * ctx : ctxs) {
  1541. ggml_free(ctx);
  1542. }
  1543. for (ggml_backend_buffer_t buf : bufs) {
  1544. ggml_backend_buffer_free(buf);
  1545. }
  1546. }
  1547. };
  1548. struct llama_vocab {
  1549. using id = int32_t;
  1550. using token = std::string;
  1551. using ttype = llama_token_type;
  1552. struct token_data {
  1553. token text;
  1554. float score;
  1555. ttype type;
  1556. };
  1557. enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM;
  1558. std::unordered_map<token, id> token_to_id;
  1559. std::vector<token_data> id_to_token;
  1560. std::unordered_map<token, id> special_tokens_cache;
  1561. std::map<std::pair<std::string, std::string>, int> bpe_ranks;
  1562. // default LLaMA special tokens
  1563. id special_bos_id = 1;
  1564. id special_eos_id = 2;
  1565. id special_unk_id = 0;
  1566. id special_sep_id = -1;
  1567. id special_pad_id = -1;
  1568. int special_add_bos = -1; // -1 unknown, 1 add, 0 don't add.
  1569. int special_add_eos = -1; // -1 unknown, 1 add, 0 don't add.
  1570. id linefeed_id = 13;
  1571. id special_prefix_id = 32007;
  1572. id special_middle_id = 32009;
  1573. id special_suffix_id = 32008;
  1574. id special_eot_id = 32010;
  1575. bool add_space_prefix = true;
  1576. int find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
  1577. GGML_ASSERT(token_left.find(' ') == std::string::npos);
  1578. GGML_ASSERT(token_left.find('\n') == std::string::npos);
  1579. GGML_ASSERT(token_right.find(' ') == std::string::npos);
  1580. GGML_ASSERT(token_right.find('\n') == std::string::npos);
  1581. auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
  1582. if (it == bpe_ranks.end()) {
  1583. return -1;
  1584. }
  1585. return it->second;
  1586. }
  1587. };
  1588. struct llama_model {
  1589. e_model type = MODEL_UNKNOWN;
  1590. llm_arch arch = LLM_ARCH_UNKNOWN;
  1591. llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
  1592. std::string name = "n/a";
  1593. llama_hparams hparams = {};
  1594. llama_vocab vocab;
  1595. struct ggml_tensor * tok_embd;
  1596. struct ggml_tensor * type_embd;
  1597. struct ggml_tensor * pos_embd;
  1598. struct ggml_tensor * tok_norm;
  1599. struct ggml_tensor * tok_norm_b;
  1600. struct ggml_tensor * output_norm;
  1601. struct ggml_tensor * output_norm_b;
  1602. struct ggml_tensor * output;
  1603. struct ggml_tensor * output_b;
  1604. std::vector<llama_layer> layers;
  1605. llama_split_mode split_mode;
  1606. int main_gpu;
  1607. int n_gpu_layers;
  1608. // gguf metadata
  1609. std::unordered_map<std::string, std::string> gguf_kv;
  1610. // layer -> buffer type mapping
  1611. struct layer_buft {
  1612. layer_buft() : buft_matrix(nullptr), buft(nullptr) {}
  1613. layer_buft(ggml_backend_buffer_type_t matrix) : buft_matrix(matrix), buft(matrix) {}
  1614. layer_buft(ggml_backend_buffer_type_t matrix, ggml_backend_buffer_type_t other) : buft_matrix(matrix), buft(other) {}
  1615. ggml_backend_buffer_type_t buft_matrix; // matrices only - used by split buffers and backends that support only matrix multiplication
  1616. ggml_backend_buffer_type_t buft; // everything else
  1617. };
  1618. layer_buft buft_input;
  1619. layer_buft buft_output;
  1620. std::vector<layer_buft> buft_layer;
  1621. // contexts where the model tensors metadata is stored
  1622. std::vector<struct ggml_context *> ctxs;
  1623. // the model memory buffers for the tensor data
  1624. std::vector<ggml_backend_buffer_t> bufs;
  1625. // model memory mapped file
  1626. std::unique_ptr<llama_mmap> mapping;
  1627. // objects representing data potentially being locked in memory
  1628. std::vector<std::unique_ptr<llama_mlock>> mlock_bufs;
  1629. llama_mlock mlock_mmap;
  1630. // for quantize-stats only
  1631. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  1632. int64_t t_load_us = 0;
  1633. int64_t t_start_us = 0;
  1634. ~llama_model() {
  1635. for (struct ggml_context * ctx : ctxs) {
  1636. ggml_free(ctx);
  1637. }
  1638. for (ggml_backend_buffer_t buf : bufs) {
  1639. ggml_backend_buffer_free(buf);
  1640. }
  1641. }
  1642. };
  1643. struct llama_context {
  1644. llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {}
  1645. ~llama_context() {
  1646. ggml_backend_sched_free(sched);
  1647. for (ggml_backend_t backend : backends) {
  1648. ggml_backend_free(backend);
  1649. }
  1650. #ifdef GGML_USE_VULKAN
  1651. ggml_vk_free_cpu_assist();
  1652. #endif
  1653. ggml_backend_buffer_free(buf_input);
  1654. ggml_free(ctx_input);
  1655. }
  1656. llama_cparams cparams;
  1657. std::vector<ggml_backend_t> backends;
  1658. #ifdef GGML_USE_METAL
  1659. ggml_backend_t backend_metal = nullptr;
  1660. #endif
  1661. ggml_backend_t backend_cpu = nullptr;
  1662. const llama_model & model;
  1663. // key + value cache for the self attention
  1664. struct llama_kv_cache kv_self;
  1665. std::mt19937 rng;
  1666. bool has_evaluated_once = false;
  1667. int64_t t_start_us;
  1668. int64_t t_load_us;
  1669. int64_t t_sample_us = 0;
  1670. int64_t t_p_eval_us = 0;
  1671. int64_t t_eval_us = 0;
  1672. int32_t n_sample = 0; // number of tokens sampled
  1673. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  1674. int32_t n_eval = 0; // number of eval calls
  1675. // decode output (2-dimensional array: [n_tokens][n_vocab])
  1676. std::vector<float> logits;
  1677. #ifndef NDEBUG
  1678. // guard against access to unset logits
  1679. std::vector<bool> logits_valid;
  1680. #endif
  1681. bool logits_all = false;
  1682. // input embedding (1-dimensional array: [n_embd])
  1683. std::vector<float> embedding;
  1684. // memory buffers used to evaluate the model
  1685. std::vector<uint8_t> buf_compute_meta;
  1686. ggml_backend_sched_t sched = nullptr;
  1687. // input tensors
  1688. ggml_backend_buffer_t buf_input = nullptr;
  1689. ggml_context * ctx_input = nullptr;
  1690. struct ggml_tensor * inp_tokens; // I32 [n_batch]
  1691. struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
  1692. struct ggml_tensor * inp_pos; // I32 [n_batch]
  1693. struct ggml_tensor * inp_KQ_mask; // F32 [n_ctx, n_batch]
  1694. struct ggml_tensor * inp_KQ_pos; // F32 [n_ctx]
  1695. struct ggml_tensor * inp_K_shift; // I32 [n_ctx]
  1696. struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
  1697. struct ggml_tensor * inp_cls; // I32 [n_batch]
  1698. #ifdef GGML_USE_MPI
  1699. ggml_mpi_context * ctx_mpi = NULL;
  1700. #endif
  1701. };
  1702. //
  1703. // kv cache helpers
  1704. //
  1705. static bool llama_kv_cache_init(
  1706. struct llama_kv_cache & cache,
  1707. const llama_model & model,
  1708. ggml_type ktype,
  1709. ggml_type vtype,
  1710. uint32_t n_ctx,
  1711. bool offload) {
  1712. const struct llama_hparams & hparams = model.hparams;
  1713. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  1714. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  1715. const int64_t n_layer = hparams.n_layer;
  1716. cache.has_shift = false;
  1717. cache.head = 0;
  1718. cache.size = n_ctx;
  1719. cache.used = 0;
  1720. cache.cells.clear();
  1721. cache.cells.resize(n_ctx);
  1722. #ifdef GGML_USE_CLBLAST
  1723. offload = false;
  1724. #endif
  1725. // count used buffer types
  1726. std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
  1727. if (offload) {
  1728. for (int64_t i = 0; i < n_layer; ++i) {
  1729. buft_layer_count[model.buft_layer[i].buft]++;
  1730. }
  1731. } else {
  1732. buft_layer_count[llama_default_buffer_type_cpu(true)] = n_layer;
  1733. }
  1734. // create a context for each buffer type
  1735. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  1736. for (auto & it : buft_layer_count) {
  1737. int n_layers = it.second;
  1738. struct ggml_init_params params = {
  1739. /*.mem_size =*/ 2u*n_layers*ggml_tensor_overhead(),
  1740. /*.mem_buffer =*/ NULL,
  1741. /*.no_alloc =*/ true,
  1742. };
  1743. ggml_context * ctx = ggml_init(params);
  1744. if (!ctx) {
  1745. LLAMA_LOG_ERROR("%s: failed to allocate context for kv cache\n", __func__);
  1746. return false;
  1747. }
  1748. ctx_map[it.first] = ctx;
  1749. cache.ctxs.push_back(ctx);
  1750. }
  1751. cache.k_l.reserve(n_layer);
  1752. cache.v_l.reserve(n_layer);
  1753. for (int i = 0; i < (int) n_layer; i++) {
  1754. struct ggml_context * ctx = offload ? ctx_map.at(model.buft_layer[i].buft) : cache.ctxs.front();
  1755. ggml_tensor * k = ggml_new_tensor_1d(ctx, ktype, n_embd_k_gqa*n_ctx);
  1756. ggml_tensor * v = ggml_new_tensor_1d(ctx, vtype, n_embd_v_gqa*n_ctx);
  1757. ggml_format_name(k, "cache_k_l%d", i);
  1758. ggml_format_name(v, "cache_v_l%d", i);
  1759. cache.k_l.push_back(k);
  1760. cache.v_l.push_back(v);
  1761. }
  1762. // allocate tensors and initialize the buffers to avoid NaNs in the padding
  1763. for (auto it : ctx_map) {
  1764. ggml_backend_buffer_type_t buft = it.first;
  1765. ggml_context * ctx = it.second;
  1766. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  1767. if (!buf) {
  1768. LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__);
  1769. return false;
  1770. }
  1771. ggml_backend_buffer_clear(buf, 0);
  1772. LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
  1773. cache.bufs.push_back(buf);
  1774. }
  1775. return true;
  1776. }
  1777. // find an empty slot of size "n_tokens" in the cache
  1778. // updates the cache head
  1779. // Note: On success, it's important that cache.head points
  1780. // to the first cell of the slot.
  1781. static bool llama_kv_cache_find_slot(
  1782. struct llama_kv_cache & cache,
  1783. const struct llama_batch & batch) {
  1784. const uint32_t n_ctx = cache.size;
  1785. const uint32_t n_tokens = batch.n_tokens;
  1786. if (n_tokens > n_ctx) {
  1787. LLAMA_LOG_ERROR("%s: n_tokens=%d > n_ctx=%d\n", __func__, n_tokens, n_ctx);
  1788. return false;
  1789. }
  1790. uint32_t n_tested = 0;
  1791. while (true) {
  1792. if (cache.head + n_tokens > n_ctx) {
  1793. n_tested += n_ctx - cache.head;
  1794. cache.head = 0;
  1795. continue;
  1796. }
  1797. bool found = true;
  1798. for (uint32_t i = 0; i < n_tokens; i++) {
  1799. if (cache.cells[cache.head + i].pos >= 0) {
  1800. found = false;
  1801. cache.head += i + 1;
  1802. n_tested += i + 1;
  1803. break;
  1804. }
  1805. }
  1806. if (found) {
  1807. break;
  1808. }
  1809. if (n_tested >= n_ctx) {
  1810. //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
  1811. return false;
  1812. }
  1813. }
  1814. for (uint32_t i = 0; i < n_tokens; i++) {
  1815. cache.cells[cache.head + i].pos = batch.pos[i];
  1816. for (int32_t j = 0; j < batch.n_seq_id[i]; j++) {
  1817. cache.cells[cache.head + i].seq_id.insert(batch.seq_id[i][j]);
  1818. }
  1819. }
  1820. cache.used += n_tokens;
  1821. return true;
  1822. }
  1823. // find how many cells are currently in use
  1824. static int32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
  1825. for (uint32_t i = cache.size - 1; i > 0; --i) {
  1826. if (cache.cells[i].pos >= 0 && !cache.cells[i].seq_id.empty()) {
  1827. return i + 1;
  1828. }
  1829. }
  1830. return 0;
  1831. }
  1832. static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
  1833. for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
  1834. cache.cells[i].pos = -1;
  1835. cache.cells[i].seq_id.clear();
  1836. }
  1837. cache.head = 0;
  1838. cache.used = 0;
  1839. }
  1840. static void llama_kv_cache_seq_rm(
  1841. struct llama_kv_cache & cache,
  1842. llama_seq_id seq_id,
  1843. llama_pos p0,
  1844. llama_pos p1) {
  1845. uint32_t new_head = cache.size;
  1846. if (p0 < 0) p0 = 0;
  1847. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1848. for (uint32_t i = 0; i < cache.size; ++i) {
  1849. if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1850. if (seq_id < 0) {
  1851. cache.cells[i].seq_id.clear();
  1852. } else if (cache.cells[i].has_seq_id(seq_id)) {
  1853. cache.cells[i].seq_id.erase(seq_id);
  1854. } else {
  1855. continue;
  1856. }
  1857. if (cache.cells[i].seq_id.empty()) {
  1858. // keep count of the number of used cells
  1859. if (cache.cells[i].pos >= 0) cache.used--;
  1860. cache.cells[i].pos = -1;
  1861. if (new_head == cache.size) new_head = i;
  1862. }
  1863. }
  1864. }
  1865. // If we freed up a slot, set head to it so searching can start there.
  1866. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  1867. }
  1868. static void llama_kv_cache_seq_cp(
  1869. struct llama_kv_cache & cache,
  1870. llama_seq_id seq_id_src,
  1871. llama_seq_id seq_id_dst,
  1872. llama_pos p0,
  1873. llama_pos p1) {
  1874. if (p0 < 0) p0 = 0;
  1875. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1876. cache.head = 0;
  1877. for (uint32_t i = 0; i < cache.size; ++i) {
  1878. if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1879. cache.cells[i].seq_id.insert(seq_id_dst);
  1880. }
  1881. }
  1882. }
  1883. static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
  1884. uint32_t new_head = cache.size;
  1885. for (uint32_t i = 0; i < cache.size; ++i) {
  1886. if (!cache.cells[i].has_seq_id(seq_id)) {
  1887. if (cache.cells[i].pos >= 0) cache.used--;
  1888. cache.cells[i].pos = -1;
  1889. cache.cells[i].seq_id.clear();
  1890. if (new_head == cache.size) new_head = i;
  1891. } else {
  1892. cache.cells[i].seq_id.clear();
  1893. cache.cells[i].seq_id.insert(seq_id);
  1894. }
  1895. }
  1896. // If we freed up a slot, set head to it so searching can start there.
  1897. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  1898. }
  1899. static void llama_kv_cache_seq_shift(
  1900. struct llama_kv_cache & cache,
  1901. llama_seq_id seq_id,
  1902. llama_pos p0,
  1903. llama_pos p1,
  1904. llama_pos delta) {
  1905. uint32_t new_head = cache.size;
  1906. if (p0 < 0) p0 = 0;
  1907. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1908. for (uint32_t i = 0; i < cache.size; ++i) {
  1909. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1910. cache.has_shift = true;
  1911. cache.cells[i].pos += delta;
  1912. cache.cells[i].delta += delta;
  1913. if (cache.cells[i].pos < 0) {
  1914. if (!cache.cells[i].seq_id.empty()) cache.used--;
  1915. cache.cells[i].pos = -1;
  1916. cache.cells[i].seq_id.clear();
  1917. if (new_head == cache.size) new_head = i;
  1918. }
  1919. }
  1920. }
  1921. // If we freed up a slot, set head to it so searching can start there.
  1922. // Otherwise we just start the next search from the beginning.
  1923. cache.head = new_head != cache.size ? new_head : 0;
  1924. }
  1925. static void llama_kv_cache_seq_div(
  1926. struct llama_kv_cache & cache,
  1927. llama_seq_id seq_id,
  1928. llama_pos p0,
  1929. llama_pos p1,
  1930. int d) {
  1931. if (p0 < 0) p0 = 0;
  1932. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1933. for (uint32_t i = 0; i < cache.size; ++i) {
  1934. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1935. cache.has_shift = true;
  1936. {
  1937. llama_pos p_old = cache.cells[i].pos;
  1938. cache.cells[i].pos /= d;
  1939. cache.cells[i].delta += cache.cells[i].pos - p_old;
  1940. }
  1941. }
  1942. }
  1943. }
  1944. //
  1945. // model loading and saving
  1946. //
  1947. enum llama_fver {
  1948. GGUF_FILE_VERSION_V1 = 1,
  1949. GGUF_FILE_VERSION_V2 = 2,
  1950. GGUF_FILE_VERSION_V3 = 3,
  1951. };
  1952. static const char * llama_file_version_name(llama_fver version) {
  1953. switch (version) {
  1954. case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
  1955. case GGUF_FILE_VERSION_V2: return "GGUF V2";
  1956. case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
  1957. }
  1958. return "unknown";
  1959. }
  1960. static std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
  1961. char buf[256];
  1962. snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
  1963. for (size_t i = 1; i < ne.size(); i++) {
  1964. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
  1965. }
  1966. return buf;
  1967. }
  1968. static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
  1969. char buf[256];
  1970. snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
  1971. for (int i = 1; i < GGML_MAX_DIMS; i++) {
  1972. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
  1973. }
  1974. return buf;
  1975. }
  1976. namespace GGUFMeta {
  1977. template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int)>
  1978. struct GKV_Base_Type {
  1979. static constexpr gguf_type gt = gt_;
  1980. static T getter(const gguf_context * ctx, const int kid) {
  1981. return gfun(ctx, kid);
  1982. }
  1983. };
  1984. template<typename T> struct GKV_Base;
  1985. template<> struct GKV_Base<bool >: GKV_Base_Type<bool, GGUF_TYPE_BOOL, gguf_get_val_bool> {};
  1986. template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, GGUF_TYPE_UINT8, gguf_get_val_u8 > {};
  1987. template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, GGUF_TYPE_UINT16, gguf_get_val_u16 > {};
  1988. template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, GGUF_TYPE_UINT32, gguf_get_val_u32 > {};
  1989. template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, GGUF_TYPE_UINT64, gguf_get_val_u64 > {};
  1990. template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, GGUF_TYPE_INT8, gguf_get_val_i8 > {};
  1991. template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, GGUF_TYPE_INT16, gguf_get_val_i16 > {};
  1992. template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, GGUF_TYPE_INT32, gguf_get_val_i32 > {};
  1993. template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, GGUF_TYPE_INT64, gguf_get_val_i64 > {};
  1994. template<> struct GKV_Base<float >: GKV_Base_Type<float, GGUF_TYPE_FLOAT32, gguf_get_val_f32 > {};
  1995. template<> struct GKV_Base<double >: GKV_Base_Type<double, GGUF_TYPE_FLOAT64, gguf_get_val_f64 > {};
  1996. template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, GGUF_TYPE_STRING, gguf_get_val_str > {};
  1997. template<> struct GKV_Base<std::string> {
  1998. static constexpr gguf_type gt = GGUF_TYPE_STRING;
  1999. static std::string getter(const gguf_context * ctx, const int kid) {
  2000. return gguf_get_val_str(ctx, kid);
  2001. }
  2002. };
  2003. struct ArrayInfo{
  2004. const gguf_type gt;
  2005. const size_t length;
  2006. const void * data;
  2007. };
  2008. template<> struct GKV_Base<ArrayInfo> {
  2009. public:
  2010. static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
  2011. static ArrayInfo getter(const gguf_context *ctx, const int k) {
  2012. return ArrayInfo {
  2013. gguf_get_arr_type(ctx, k),
  2014. size_t(gguf_get_arr_n(ctx, k)),
  2015. gguf_get_arr_data(ctx, k),
  2016. };
  2017. }
  2018. };
  2019. template<typename T>
  2020. class GKV: public GKV_Base<T> {
  2021. GKV() = delete;
  2022. public:
  2023. static T get_kv(const gguf_context * ctx, const int k) {
  2024. const enum gguf_type kt = gguf_get_kv_type(ctx, k);
  2025. if (kt != GKV::gt) {
  2026. throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
  2027. gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt)));
  2028. }
  2029. return GKV::getter(ctx, k);
  2030. }
  2031. static const char * override_type_to_str(const llama_model_kv_override_type ty) {
  2032. switch (ty) {
  2033. case LLAMA_KV_OVERRIDE_BOOL: return "bool";
  2034. case LLAMA_KV_OVERRIDE_INT: return "int";
  2035. case LLAMA_KV_OVERRIDE_FLOAT: return "float";
  2036. }
  2037. return "unknown";
  2038. }
  2039. static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override *override) {
  2040. if (!override) { return false; }
  2041. if (override->tag == expected_type) {
  2042. LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
  2043. __func__, override_type_to_str(override->tag), override->key);
  2044. switch (override->tag) {
  2045. case LLAMA_KV_OVERRIDE_BOOL: {
  2046. LLAMA_LOG_INFO("%s\n", override->bool_value ? "true" : "false");
  2047. } break;
  2048. case LLAMA_KV_OVERRIDE_INT: {
  2049. LLAMA_LOG_INFO("%" PRId64 "\n", override->int_value);
  2050. } break;
  2051. case LLAMA_KV_OVERRIDE_FLOAT: {
  2052. LLAMA_LOG_INFO("%.6f\n", override->float_value);
  2053. } break;
  2054. default:
  2055. // Shouldn't be possible to end up here, but just in case...
  2056. throw std::runtime_error(
  2057. format("Unsupported attempt to override %s type for metadata key %s\n",
  2058. override_type_to_str(override->tag), override->key));
  2059. }
  2060. return true;
  2061. }
  2062. LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
  2063. __func__, override->key, override_type_to_str(expected_type), override_type_to_str(override->tag));
  2064. return false;
  2065. }
  2066. template<typename OT>
  2067. static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
  2068. try_override(OT & target, const struct llama_model_kv_override *override) {
  2069. if (validate_override(LLAMA_KV_OVERRIDE_BOOL, override)) {
  2070. target = override->bool_value;
  2071. return true;
  2072. }
  2073. return false;
  2074. }
  2075. template<typename OT>
  2076. static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
  2077. try_override(OT & target, const struct llama_model_kv_override *override) {
  2078. if (validate_override(LLAMA_KV_OVERRIDE_INT, override)) {
  2079. target = override->int_value;
  2080. return true;
  2081. }
  2082. return false;
  2083. }
  2084. template<typename OT>
  2085. static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
  2086. try_override(T & target, const struct llama_model_kv_override *override) {
  2087. if (validate_override(LLAMA_KV_OVERRIDE_FLOAT, override)) {
  2088. target = override->float_value;
  2089. return true;
  2090. }
  2091. return false;
  2092. }
  2093. template<typename OT>
  2094. static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
  2095. try_override(T & target, const struct llama_model_kv_override *override) {
  2096. (void)target;
  2097. (void)override;
  2098. if (!override) { return false; }
  2099. // Currently, we should never end up here so it would be a bug if we do.
  2100. throw std::runtime_error(format("Unsupported attempt to override string type for metadata key %s\n",
  2101. override ? override->key : "NULL"));
  2102. }
  2103. static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override *override = nullptr) {
  2104. if (try_override<T>(target, override)) {
  2105. return true;
  2106. }
  2107. if (k < 0) { return false; }
  2108. target = get_kv(ctx, k);
  2109. return true;
  2110. }
  2111. static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override *override = nullptr) {
  2112. return set(ctx, gguf_find_key(ctx, key), target, override);
  2113. }
  2114. static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override *override = nullptr) {
  2115. return set(ctx, key.c_str(), target, override);
  2116. }
  2117. };
  2118. }
  2119. struct llama_model_loader {
  2120. int n_kv = 0;
  2121. int n_tensors = 0;
  2122. int n_created = 0;
  2123. int64_t n_elements = 0;
  2124. size_t n_bytes = 0;
  2125. bool use_mmap = false;
  2126. llama_file file;
  2127. llama_ftype ftype;
  2128. llama_fver fver;
  2129. std::unique_ptr<llama_mmap> mapping;
  2130. std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
  2131. struct gguf_context * ctx_gguf = NULL;
  2132. struct ggml_context * ctx_meta = NULL;
  2133. std::string arch_name;
  2134. LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
  2135. llama_model_loader(const std::string & fname, bool use_mmap, const struct llama_model_kv_override * param_overrides_p) : file(fname.c_str(), "rb") {
  2136. int trace = 0;
  2137. if (getenv("LLAMA_TRACE")) {
  2138. trace = atoi(getenv("LLAMA_TRACE"));
  2139. }
  2140. struct gguf_init_params params = {
  2141. /*.no_alloc = */ true,
  2142. /*.ctx = */ &ctx_meta,
  2143. };
  2144. if (param_overrides_p != nullptr) {
  2145. for (const struct llama_model_kv_override *p = param_overrides_p; p->key[0] != 0; p++) {
  2146. kv_overrides.insert({std::string(p->key), *p});
  2147. }
  2148. }
  2149. ctx_gguf = gguf_init_from_file(fname.c_str(), params);
  2150. if (!ctx_gguf) {
  2151. throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
  2152. }
  2153. get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
  2154. llm_kv = LLM_KV(llm_arch_from_string(arch_name));
  2155. n_kv = gguf_get_n_kv(ctx_gguf);
  2156. n_tensors = gguf_get_n_tensors(ctx_gguf);
  2157. fver = (enum llama_fver ) gguf_get_version(ctx_gguf);
  2158. for (int i = 0; i < n_tensors; i++) {
  2159. const char * name = gguf_get_tensor_name(ctx_gguf, i);
  2160. struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name);
  2161. n_elements += ggml_nelements(t);
  2162. n_bytes += ggml_nbytes(t);
  2163. }
  2164. LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
  2165. __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
  2166. // determine file type based on the number of tensors for each quantization and print meta data
  2167. // TODO: make optional
  2168. {
  2169. std::map<enum ggml_type, uint32_t> n_type;
  2170. uint32_t n_type_max = 0;
  2171. enum ggml_type type_max = GGML_TYPE_F32;
  2172. for (int i = 0; i < n_tensors; i++) {
  2173. enum ggml_type type = gguf_get_tensor_type(ctx_gguf, i);
  2174. n_type[type]++;
  2175. if (n_type_max < n_type[type]) {
  2176. n_type_max = n_type[type];
  2177. type_max = type;
  2178. }
  2179. if (trace > 0) {
  2180. struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i));
  2181. LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, ggml_get_name(meta), ggml_type_name(type), llama_format_tensor_shape(meta).c_str());
  2182. }
  2183. }
  2184. switch (type_max) {
  2185. case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
  2186. case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
  2187. case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
  2188. case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
  2189. case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
  2190. case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
  2191. case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
  2192. case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
  2193. case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
  2194. case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
  2195. case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
  2196. case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
  2197. case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
  2198. case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
  2199. case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
  2200. case GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break;
  2201. default:
  2202. {
  2203. LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
  2204. ftype = LLAMA_FTYPE_ALL_F32;
  2205. } break;
  2206. }
  2207. // this is a way to mark that we have "guessed" the file type
  2208. ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
  2209. {
  2210. const int kid = gguf_find_key(ctx_gguf, "general.file_type");
  2211. if (kid >= 0) {
  2212. ftype = (llama_ftype) gguf_get_val_u32(ctx_gguf, kid);
  2213. }
  2214. }
  2215. LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
  2216. for (int i = 0; i < n_kv; i++) {
  2217. const char * name = gguf_get_key(ctx_gguf, i);
  2218. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  2219. const std::string type_name =
  2220. type == GGUF_TYPE_ARRAY
  2221. ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx_gguf, i)), gguf_get_arr_n(ctx_gguf, i))
  2222. : gguf_type_name(type);
  2223. std::string value = gguf_kv_to_str(ctx_gguf, i);
  2224. const size_t MAX_VALUE_LEN = 40;
  2225. if (value.size() > MAX_VALUE_LEN) {
  2226. value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
  2227. }
  2228. replace_all(value, "\n", "\\n");
  2229. LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
  2230. }
  2231. // print type counts
  2232. for (auto & kv : n_type) {
  2233. if (kv.second == 0) {
  2234. continue;
  2235. }
  2236. LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
  2237. }
  2238. }
  2239. if (!llama_mmap::SUPPORTED) {
  2240. LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
  2241. use_mmap = false;
  2242. }
  2243. this->use_mmap = use_mmap;
  2244. }
  2245. ~llama_model_loader() {
  2246. if (ctx_gguf) {
  2247. gguf_free(ctx_gguf);
  2248. }
  2249. if (ctx_meta) {
  2250. ggml_free(ctx_meta);
  2251. }
  2252. }
  2253. template<typename T>
  2254. typename std::enable_if<std::is_integral<T>::value, bool>::type
  2255. get_arr_n(const std::string & key, T & result, const bool required = true) {
  2256. const int kid = gguf_find_key(ctx_gguf, key.c_str());
  2257. if (kid < 0) {
  2258. if (required) {
  2259. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  2260. }
  2261. return false;
  2262. }
  2263. struct GGUFMeta::ArrayInfo arr_info =
  2264. GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(ctx_gguf, kid);
  2265. result = arr_info.length;
  2266. return true;
  2267. }
  2268. template<typename T>
  2269. typename std::enable_if<std::is_integral<T>::value, bool>::type
  2270. get_arr_n(const enum llm_kv kid, T & result, const bool required = true) {
  2271. return get_arr_n(llm_kv(kid), result, required);
  2272. }
  2273. template<typename T>
  2274. bool get_key(const std::string & key, T & result, const bool required = true) {
  2275. auto it = kv_overrides.find(key);
  2276. const struct llama_model_kv_override * override =
  2277. it != kv_overrides.end() ? &it->second : nullptr;
  2278. const bool found = GGUFMeta::GKV<T>::set(ctx_gguf, key, result, override);
  2279. if (required && !found) {
  2280. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  2281. }
  2282. return found;
  2283. }
  2284. template<typename T>
  2285. bool get_key(const enum llm_kv kid, T & result, const bool required = true) {
  2286. return get_key(llm_kv(kid), result, required);
  2287. }
  2288. std::string get_arch_name() const {
  2289. return arch_name;
  2290. }
  2291. enum llm_arch get_arch() const {
  2292. return llm_kv.arch;
  2293. }
  2294. const char * get_tensor_name(int i) const {
  2295. return gguf_get_tensor_name(ctx_gguf, i);
  2296. }
  2297. struct ggml_tensor * get_tensor_meta(const char * name) const {
  2298. return ggml_get_tensor(ctx_meta, name);
  2299. }
  2300. struct ggml_tensor * get_tensor_meta(int i) const {
  2301. return get_tensor_meta(get_tensor_name(i));
  2302. }
  2303. struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta) {
  2304. struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta);
  2305. ggml_set_name(tensor, ggml_get_name(meta));
  2306. n_created++;
  2307. return tensor;
  2308. }
  2309. struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, bool required = true) {
  2310. struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
  2311. if (cur == NULL) {
  2312. if (!required) {
  2313. return NULL;
  2314. }
  2315. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
  2316. }
  2317. {
  2318. bool is_ok = true;
  2319. for (size_t i = 0; i < ne.size(); ++i) {
  2320. if (ne[i] != cur->ne[i]) {
  2321. is_ok = false;
  2322. break;
  2323. }
  2324. }
  2325. if (!is_ok) {
  2326. throw std::runtime_error(
  2327. format("%s: tensor '%s' has wrong shape; expected %s, got %s",
  2328. __func__, name.c_str(),
  2329. llama_format_tensor_shape(ne).c_str(),
  2330. llama_format_tensor_shape(cur).c_str()));
  2331. }
  2332. }
  2333. return create_tensor_for(ctx, cur);
  2334. }
  2335. void done_getting_tensors() const {
  2336. if (n_created != n_tensors) {
  2337. throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
  2338. }
  2339. }
  2340. size_t file_offset(const char * name) const {
  2341. const int idx = gguf_find_tensor(ctx_gguf, name);
  2342. if (idx < 0) {
  2343. throw std::runtime_error(format("%s: tensor '%s' not found in the file", __func__, name));
  2344. }
  2345. return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx);
  2346. }
  2347. void init_mapping(bool prefetch = true, llama_mlock * lmlock = nullptr) {
  2348. // prefetch the whole file - all the data is needed anyway
  2349. if (use_mmap) {
  2350. mapping.reset(new llama_mmap(&file, prefetch ? -1 : 0, ggml_is_numa()));
  2351. }
  2352. // compute the total size of all tensors for progress reporting
  2353. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  2354. struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i));
  2355. size_data += ggml_nbytes(cur);
  2356. }
  2357. if (use_mmap && mapping) {
  2358. if (lmlock) {
  2359. lmlock->init(mapping->addr);
  2360. }
  2361. mmap_used_first = mapping->size;
  2362. }
  2363. }
  2364. void get_mapping_range(size_t * first, size_t * last, ggml_context * ctx) const {
  2365. GGML_ASSERT(mapping);
  2366. *first = mapping->size;
  2367. *last = 0;
  2368. for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) {
  2369. const size_t offs = file_offset(ggml_get_name(tensor));
  2370. *first = std::min(*first, offs);
  2371. *last = std::max(*last, offs + ggml_nbytes(tensor));
  2372. }
  2373. }
  2374. // for backwards compatibility, does not support ggml-backend
  2375. void load_data_for(struct ggml_tensor * cur) const {
  2376. const size_t offs = file_offset(ggml_get_name(cur));
  2377. if (use_mmap && mapping) {
  2378. if (cur->data == nullptr) {
  2379. cur->data = (uint8_t *)mapping->addr + offs;
  2380. } else {
  2381. memcpy(cur->data, (uint8_t *)mapping->addr + offs, ggml_nbytes(cur));
  2382. }
  2383. } else {
  2384. GGML_ASSERT(cur->data != nullptr);
  2385. file.seek(offs, SEEK_SET);
  2386. file.read_raw(cur->data, ggml_nbytes(cur));
  2387. }
  2388. }
  2389. size_t size_done = 0;
  2390. size_t size_data = 0;
  2391. size_t mmap_used_first = -1;
  2392. size_t mmap_used_last = 0;
  2393. // Returns false if cancelled by progress_callback
  2394. bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) {
  2395. GGML_ASSERT(size_data != 0 && "call init_mapping() first");
  2396. std::vector<no_init<uint8_t>> read_buf;
  2397. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  2398. struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
  2399. if (!cur) {
  2400. // some tensors may be allocated in a different context
  2401. continue;
  2402. }
  2403. if (progress_callback) {
  2404. if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
  2405. return false;
  2406. }
  2407. }
  2408. const size_t offs = file_offset(ggml_get_name(cur));
  2409. if (use_mmap && mapping) {
  2410. if (buf_mmap && cur->data == nullptr) {
  2411. ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + offs);
  2412. if (lmlock) {
  2413. lmlock->grow_to(offs + ggml_nbytes(cur));
  2414. }
  2415. mmap_used_first = std::min(mmap_used_first, offs);
  2416. mmap_used_last = std::max(mmap_used_last, offs + ggml_nbytes(cur));
  2417. } else {
  2418. ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + offs, 0, ggml_nbytes(cur));
  2419. }
  2420. } else {
  2421. if (ggml_backend_buffer_is_host(cur->buffer)) {
  2422. file.seek(offs, SEEK_SET);
  2423. file.read_raw(cur->data, ggml_nbytes(cur));
  2424. } else {
  2425. read_buf.resize(ggml_nbytes(cur));
  2426. file.seek(offs, SEEK_SET);
  2427. file.read_raw(read_buf.data(), ggml_nbytes(cur));
  2428. ggml_backend_tensor_set(cur, read_buf.data(), 0, ggml_nbytes(cur));
  2429. }
  2430. }
  2431. size_done += ggml_nbytes(cur);
  2432. }
  2433. // check if this is the last call and do final cleanup
  2434. if (size_done >= size_data) {
  2435. // unmap offloaded tensors and metadata
  2436. if (use_mmap && mapping) {
  2437. mapping->unmap_fragment(0, mmap_used_first);
  2438. if (mmap_used_last != 0) {
  2439. mapping->unmap_fragment(mmap_used_last, mapping->size);
  2440. }
  2441. }
  2442. if (progress_callback) {
  2443. // Even though the model is done loading, we still honor
  2444. // cancellation since we need to free allocations.
  2445. return progress_callback(1.0f, progress_callback_user_data);
  2446. }
  2447. }
  2448. return true;
  2449. }
  2450. };
  2451. //
  2452. // load LLaMA models
  2453. //
  2454. static const char * llama_model_arch_name(llm_arch arch) {
  2455. auto it = LLM_ARCH_NAMES.find(arch);
  2456. if (it == LLM_ARCH_NAMES.end()) {
  2457. return "unknown";
  2458. }
  2459. return it->second;
  2460. }
  2461. static std::string llama_model_ftype_name(llama_ftype ftype) {
  2462. if (ftype & LLAMA_FTYPE_GUESSED) {
  2463. return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
  2464. }
  2465. switch (ftype) {
  2466. case LLAMA_FTYPE_ALL_F32: return "all F32";
  2467. case LLAMA_FTYPE_MOSTLY_F16: return "F16";
  2468. case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
  2469. case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
  2470. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  2471. return "Q4_1, some F16";
  2472. case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
  2473. case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
  2474. case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
  2475. // K-quants
  2476. case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium";
  2477. case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small";
  2478. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
  2479. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
  2480. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
  2481. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
  2482. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
  2483. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
  2484. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
  2485. case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
  2486. case LLAMA_FTYPE_MOSTLY_IQ2_XXS:return "IQ2_XXS - 2.0625 bpw";
  2487. case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw";
  2488. case LLAMA_FTYPE_MOSTLY_Q3_K_XS:return "Q3_K - Extra small";
  2489. case LLAMA_FTYPE_MOSTLY_IQ3_XXS:return "IQ3_XXS - 3.0625 bpw";
  2490. case LLAMA_FTYPE_MOSTLY_IQ1_S :return "IQ1_S - 1.5625 bpw";
  2491. default: return "unknown, may not work";
  2492. }
  2493. }
  2494. static const char * llama_model_type_name(e_model type) {
  2495. switch (type) {
  2496. case MODEL_22M: return "22M";
  2497. case MODEL_33M: return "33M";
  2498. case MODEL_109M: return "109M";
  2499. case MODEL_137M: return "137M";
  2500. case MODEL_0_5B: return "0.5B";
  2501. case MODEL_1B: return "1B";
  2502. case MODEL_2B: return "2B";
  2503. case MODEL_3B: return "3B";
  2504. case MODEL_7B: return "7B";
  2505. case MODEL_8B: return "8B";
  2506. case MODEL_13B: return "13B";
  2507. case MODEL_14B: return "14B";
  2508. case MODEL_15B: return "15B";
  2509. case MODEL_20B: return "20B";
  2510. case MODEL_30B: return "30B";
  2511. case MODEL_34B: return "34B";
  2512. case MODEL_40B: return "40B";
  2513. case MODEL_65B: return "65B";
  2514. case MODEL_70B: return "70B";
  2515. case MODEL_SMALL: return "0.1B";
  2516. case MODEL_MEDIUM: return "0.4B";
  2517. case MODEL_LARGE: return "0.8B";
  2518. case MODEL_XL: return "1.5B";
  2519. default: return "?B";
  2520. }
  2521. }
  2522. static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
  2523. switch (type) {
  2524. case LLAMA_VOCAB_TYPE_SPM: return "SPM";
  2525. case LLAMA_VOCAB_TYPE_BPE: return "BPE";
  2526. case LLAMA_VOCAB_TYPE_WPM: return "WPM";
  2527. default: return "unknown";
  2528. }
  2529. }
  2530. static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
  2531. model.arch = ml.get_arch();
  2532. if (model.arch == LLM_ARCH_UNKNOWN) {
  2533. throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
  2534. }
  2535. }
  2536. static void llm_load_hparams(
  2537. llama_model_loader & ml,
  2538. llama_model & model) {
  2539. auto & hparams = model.hparams;
  2540. const gguf_context * ctx = ml.ctx_gguf;
  2541. // get metadata as string
  2542. for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
  2543. enum gguf_type type = gguf_get_kv_type(ctx, i);
  2544. if (type == GGUF_TYPE_ARRAY) {
  2545. continue;
  2546. }
  2547. const char * name = gguf_get_key(ctx, i);
  2548. const std::string value = gguf_kv_to_str(ctx, i);
  2549. model.gguf_kv.emplace(name, value);
  2550. }
  2551. // get general kv
  2552. ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
  2553. // get hparams kv
  2554. ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
  2555. ml.get_key (LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
  2556. ml.get_key (LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
  2557. ml.get_key (LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
  2558. ml.get_key (LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head);
  2559. ml.get_key (LLM_KV_BLOCK_COUNT, hparams.n_layer);
  2560. ml.get_key (LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
  2561. ml.get_key (LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
  2562. GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
  2563. GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
  2564. if (hparams.n_expert > 0) {
  2565. GGML_ASSERT(hparams.n_expert_used > 0);
  2566. } else {
  2567. GGML_ASSERT(hparams.n_expert_used == 0);
  2568. }
  2569. // n_head_kv is optional, default to n_head
  2570. hparams.n_head_kv = hparams.n_head;
  2571. ml.get_key(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv, false);
  2572. bool rope_finetuned = false;
  2573. ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
  2574. hparams.rope_finetuned = rope_finetuned;
  2575. hparams.n_yarn_orig_ctx = hparams.n_ctx_train;
  2576. ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_yarn_orig_ctx, false);
  2577. // rope_freq_base (optional)
  2578. hparams.rope_freq_base_train = 10000.0f;
  2579. ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
  2580. std::string rope_scaling("linear");
  2581. ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
  2582. hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
  2583. GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_UNSPECIFIED);
  2584. // rope_freq_scale (inverse of the kv) is optional
  2585. float ropescale = 0.0f;
  2586. if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
  2587. // try the old key name
  2588. ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
  2589. }
  2590. hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
  2591. // sanity check for n_rot (optional)
  2592. {
  2593. hparams.n_rot = hparams.n_embd / hparams.n_head;
  2594. ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
  2595. if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
  2596. if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
  2597. throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
  2598. }
  2599. }
  2600. // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
  2601. // gpt-j n_rot = rotary_dim
  2602. }
  2603. hparams.n_embd_head_k = hparams.n_embd / hparams.n_head;
  2604. ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
  2605. hparams.n_embd_head_v = hparams.n_embd / hparams.n_head;
  2606. ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
  2607. // arch-specific KVs
  2608. switch (model.arch) {
  2609. case LLM_ARCH_LLAMA:
  2610. {
  2611. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2612. switch (hparams.n_layer) {
  2613. case 22: model.type = e_model::MODEL_1B; break;
  2614. case 26: model.type = e_model::MODEL_3B; break;
  2615. case 32: model.type = e_model::MODEL_7B; break;
  2616. case 40: model.type = e_model::MODEL_13B; break;
  2617. case 48: model.type = e_model::MODEL_34B; break;
  2618. case 60: model.type = e_model::MODEL_30B; break;
  2619. case 80: model.type = hparams.n_head == hparams.n_head_kv ? e_model::MODEL_65B : e_model::MODEL_70B; break;
  2620. default: model.type = e_model::MODEL_UNKNOWN;
  2621. }
  2622. } break;
  2623. case LLM_ARCH_MINICPM:
  2624. {
  2625. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2626. switch (hparams.n_layer) {
  2627. case 40: model.type = e_model::MODEL_2B; break;
  2628. default: model.type = e_model::MODEL_UNKNOWN;
  2629. }
  2630. } break;
  2631. case LLM_ARCH_FALCON:
  2632. {
  2633. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2634. switch (hparams.n_layer) {
  2635. case 32: model.type = e_model::MODEL_7B; break;
  2636. case 60: model.type = e_model::MODEL_40B; break;
  2637. default: model.type = e_model::MODEL_UNKNOWN;
  2638. }
  2639. } break;
  2640. case LLM_ARCH_BAICHUAN:
  2641. {
  2642. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2643. switch (hparams.n_layer) {
  2644. case 32: model.type = e_model::MODEL_7B; break;
  2645. case 40: model.type = e_model::MODEL_13B; break;
  2646. default: model.type = e_model::MODEL_UNKNOWN;
  2647. }
  2648. if (model.type == e_model::MODEL_13B) {
  2649. // TODO: become GGUF KV parameter
  2650. hparams.f_max_alibi_bias = 8.0f;
  2651. }
  2652. } break;
  2653. case LLM_ARCH_STARCODER:
  2654. {
  2655. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2656. switch (hparams.n_layer) {
  2657. case 24: model.type = e_model::MODEL_1B; break;
  2658. case 36: model.type = e_model::MODEL_3B; break;
  2659. case 42: model.type = e_model::MODEL_7B; break;
  2660. case 40: model.type = e_model::MODEL_15B; break;
  2661. default: model.type = e_model::MODEL_UNKNOWN;
  2662. }
  2663. } break;
  2664. case LLM_ARCH_PERSIMMON:
  2665. {
  2666. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2667. switch (hparams.n_layer) {
  2668. case 36: model.type = e_model::MODEL_8B; break;
  2669. default: model.type = e_model::MODEL_UNKNOWN;
  2670. }
  2671. } break;
  2672. case LLM_ARCH_REFACT:
  2673. {
  2674. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2675. switch (hparams.n_layer) {
  2676. case 32: model.type = e_model::MODEL_1B; break;
  2677. default: model.type = e_model::MODEL_UNKNOWN;
  2678. }
  2679. // TODO: become GGUF KV parameter
  2680. hparams.f_max_alibi_bias = 8.0f;
  2681. } break;
  2682. case LLM_ARCH_BERT:
  2683. {
  2684. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2685. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  2686. ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
  2687. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
  2688. switch (hparams.n_layer) {
  2689. case 3:
  2690. model.type = e_model::MODEL_17M; break; // bge-micro
  2691. case 6:
  2692. model.type = e_model::MODEL_22M; break; // MiniLM-L6
  2693. case 12:
  2694. switch (hparams.n_embd) {
  2695. case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small
  2696. case 768: model.type = e_model::MODEL_109M; break; // bge-base
  2697. } break;
  2698. case 24:
  2699. model.type = e_model::MODEL_335M; break; // bge-large
  2700. }
  2701. } break;
  2702. case LLM_ARCH_NOMIC_BERT:
  2703. {
  2704. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2705. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  2706. ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
  2707. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
  2708. if (hparams.n_layer == 12 && hparams.n_embd == 768) {
  2709. model.type = e_model::MODEL_137M;
  2710. }
  2711. } break;
  2712. case LLM_ARCH_BLOOM:
  2713. {
  2714. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2715. switch (hparams.n_layer) {
  2716. case 24: model.type = e_model::MODEL_1B; break;
  2717. case 30:
  2718. switch (hparams.n_embd) {
  2719. case 2560: model.type = e_model::MODEL_3B; break;
  2720. case 4096: model.type = e_model::MODEL_7B; break;
  2721. } break;
  2722. }
  2723. // TODO: become GGUF KV parameter
  2724. hparams.f_max_alibi_bias = 8.0f;
  2725. } break;
  2726. case LLM_ARCH_MPT:
  2727. {
  2728. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2729. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  2730. ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
  2731. switch (hparams.n_layer) {
  2732. case 32: model.type = e_model::MODEL_7B; break;
  2733. case 48: model.type = e_model::MODEL_30B; break;
  2734. default: model.type = e_model::MODEL_UNKNOWN;
  2735. }
  2736. } break;
  2737. case LLM_ARCH_STABLELM:
  2738. {
  2739. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2740. switch (hparams.n_layer) {
  2741. case 24: model.type = e_model::MODEL_1B; break;
  2742. case 32: model.type = e_model::MODEL_3B; break;
  2743. default: model.type = e_model::MODEL_UNKNOWN;
  2744. }
  2745. } break;
  2746. case LLM_ARCH_QWEN:
  2747. {
  2748. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2749. switch (hparams.n_layer) {
  2750. case 32: model.type = e_model::MODEL_7B; break;
  2751. case 40: model.type = e_model::MODEL_13B; break;
  2752. default: model.type = e_model::MODEL_UNKNOWN;
  2753. }
  2754. } break;
  2755. case LLM_ARCH_QWEN2:
  2756. {
  2757. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2758. switch (hparams.n_layer) {
  2759. case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break;
  2760. case 32: model.type = e_model::MODEL_7B; break;
  2761. case 40: model.type = hparams.n_head == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break;
  2762. case 80: model.type = e_model::MODEL_70B; break;
  2763. default: model.type = e_model::MODEL_UNKNOWN;
  2764. }
  2765. } break;
  2766. case LLM_ARCH_PHI2:
  2767. {
  2768. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2769. switch (hparams.n_layer) {
  2770. case 24: model.type = e_model::MODEL_1B; break;
  2771. case 32: model.type = e_model::MODEL_3B; break;
  2772. default: model.type = e_model::MODEL_UNKNOWN;
  2773. }
  2774. } break;
  2775. case LLM_ARCH_PLAMO:
  2776. {
  2777. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2778. switch (hparams.n_layer) {
  2779. case 40: model.type = e_model::MODEL_13B; break;
  2780. default: model.type = e_model::MODEL_UNKNOWN;
  2781. }
  2782. } break;
  2783. case LLM_ARCH_GPT2:
  2784. {
  2785. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2786. switch (hparams.n_layer) {
  2787. case 12: model.type = e_model::MODEL_SMALL; break;
  2788. case 24: model.type = e_model::MODEL_MEDIUM; break;
  2789. case 36: model.type = e_model::MODEL_LARGE; break;
  2790. case 48: model.type = e_model::MODEL_XL; break;
  2791. default: model.type = e_model::MODEL_UNKNOWN;
  2792. }
  2793. } break;
  2794. case LLM_ARCH_CODESHELL:
  2795. {
  2796. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2797. switch (hparams.n_layer) {
  2798. case 42: model.type = e_model::MODEL_SMALL; break;
  2799. default: model.type = e_model::MODEL_UNKNOWN;
  2800. }
  2801. } break;
  2802. case LLM_ARCH_ORION:
  2803. {
  2804. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2805. switch (hparams.n_layer) {
  2806. case 40: model.type = e_model::MODEL_14B; break;
  2807. default: model.type = e_model::MODEL_UNKNOWN;
  2808. }
  2809. } break;
  2810. case LLM_ARCH_INTERNLM2:
  2811. {
  2812. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2813. switch (hparams.n_layer) {
  2814. case 32: model.type = e_model::MODEL_7B; break;
  2815. case 48: model.type = e_model::MODEL_20B; break;
  2816. default: model.type = e_model::MODEL_UNKNOWN;
  2817. }
  2818. } break;
  2819. default: (void)0;
  2820. }
  2821. model.ftype = ml.ftype;
  2822. if (hparams.f_max_alibi_bias > 0.0f) {
  2823. hparams.need_kq_pos = true;
  2824. }
  2825. }
  2826. // TODO: This should probably be in llama.h
  2827. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special = false);
  2828. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch);
  2829. static void llm_load_vocab(
  2830. llama_model_loader & ml,
  2831. llama_model & model) {
  2832. auto & vocab = model.vocab;
  2833. struct gguf_context * ctx = ml.ctx_gguf;
  2834. const auto kv = LLM_KV(model.arch);
  2835. const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
  2836. if (token_idx == -1) {
  2837. throw std::runtime_error("cannot find tokenizer vocab in model file\n");
  2838. }
  2839. const float * scores = nullptr;
  2840. const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
  2841. if (score_idx != -1) {
  2842. scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
  2843. }
  2844. const int * toktypes = nullptr;
  2845. const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
  2846. if (toktype_idx != -1) {
  2847. toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
  2848. }
  2849. // determine vocab type
  2850. {
  2851. std::string tokenizer_name;
  2852. ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_name);
  2853. if (tokenizer_name == "llama") {
  2854. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  2855. // default special tokens
  2856. vocab.special_bos_id = 1;
  2857. vocab.special_eos_id = 2;
  2858. vocab.special_unk_id = 0;
  2859. vocab.special_sep_id = -1;
  2860. vocab.special_pad_id = -1;
  2861. const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
  2862. if (add_space_prefix_keyidx != -1) {
  2863. vocab.add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
  2864. } // The default value of add_space_prefix is true.
  2865. } else if (tokenizer_name == "gpt2") {
  2866. vocab.type = LLAMA_VOCAB_TYPE_BPE;
  2867. // read bpe merges and populate bpe ranks
  2868. const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
  2869. if (merges_keyidx == -1) {
  2870. throw std::runtime_error("cannot find tokenizer merges in model file\n");
  2871. }
  2872. const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
  2873. for (int i = 0; i < n_merges; i++) {
  2874. const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
  2875. GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
  2876. std::string first;
  2877. std::string second;
  2878. const size_t pos = word.find(' ', 1);
  2879. if (pos != std::string::npos) {
  2880. first = word.substr(0, pos);
  2881. second = word.substr(pos + 1);
  2882. }
  2883. vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
  2884. }
  2885. // default special tokens
  2886. vocab.special_bos_id = 11;
  2887. vocab.special_eos_id = 11;
  2888. vocab.special_unk_id = -1;
  2889. vocab.special_sep_id = -1;
  2890. vocab.special_pad_id = -1;
  2891. } else if (tokenizer_name == "bert") {
  2892. vocab.type = LLAMA_VOCAB_TYPE_WPM;
  2893. // default special tokens
  2894. vocab.special_bos_id = 101;
  2895. vocab.special_eos_id = 102;
  2896. vocab.special_unk_id = 100;
  2897. vocab.special_sep_id = -1;
  2898. vocab.special_pad_id = -1;
  2899. vocab.add_space_prefix = false;
  2900. } else {
  2901. LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str());
  2902. LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__);
  2903. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  2904. }
  2905. }
  2906. const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
  2907. vocab.id_to_token.resize(n_vocab);
  2908. for (uint32_t i = 0; i < n_vocab; i++) {
  2909. std::string word = gguf_get_arr_str(ctx, token_idx, i);
  2910. GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
  2911. vocab.token_to_id[word] = i;
  2912. auto & token_data = vocab.id_to_token[i];
  2913. token_data.text = std::move(word);
  2914. token_data.score = scores ? scores[i] : 0.0f;
  2915. token_data.type = toktypes ? (llama_token_type) toktypes[i] : LLAMA_TOKEN_TYPE_NORMAL;
  2916. }
  2917. GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
  2918. // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
  2919. if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
  2920. try {
  2921. vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
  2922. } catch (const std::exception & e) {
  2923. LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what());
  2924. vocab.linefeed_id = vocab.special_pad_id;
  2925. }
  2926. } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
  2927. vocab.linefeed_id = vocab.special_pad_id;
  2928. } else {
  2929. const std::vector<int> ids = llama_tokenize_internal(vocab, "\u010A", false);
  2930. GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
  2931. vocab.linefeed_id = ids[0];
  2932. }
  2933. // special tokens
  2934. {
  2935. const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
  2936. { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
  2937. { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
  2938. { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
  2939. { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
  2940. { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
  2941. };
  2942. for (const auto & it : special_token_types) {
  2943. const std::string & key = kv(std::get<0>(it));
  2944. int32_t & id = std::get<1>(it);
  2945. uint32_t new_id;
  2946. if (!ml.get_key(std::get<0>(it), new_id, false)) {
  2947. continue;
  2948. }
  2949. if (new_id >= vocab.id_to_token.size()) {
  2950. LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
  2951. __func__, key.c_str(), new_id, id);
  2952. } else {
  2953. id = new_id;
  2954. }
  2955. }
  2956. // Handle add_bos_token and add_eos_token
  2957. {
  2958. bool temp = true;
  2959. if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
  2960. vocab.special_add_bos = int(temp);
  2961. }
  2962. if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
  2963. vocab.special_add_eos = int(temp);
  2964. }
  2965. }
  2966. }
  2967. // build special tokens cache
  2968. {
  2969. // TODO: It is unclear (to me) at this point, whether special tokes are guaranteed to be of a deterministic type,
  2970. // and will always be correctly labeled in 'added_tokens.json' etc.
  2971. // The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
  2972. // to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
  2973. // are special tokens.
  2974. // From testing, this appears to correlate 1:1 with special tokens.
  2975. //
  2976. // Counting special tokens and verifying in only one direction
  2977. // is sufficient to detect difference in those two sets.
  2978. //
  2979. uint32_t special_tokens_count_by_type = 0;
  2980. uint32_t special_tokens_count_from_verification = 0;
  2981. bool special_tokens_definition_mismatch = false;
  2982. for (const auto & t : vocab.token_to_id) {
  2983. const auto & token = t.first;
  2984. const auto & id = t.second;
  2985. // Count all non-normal tokens in the vocab while iterating
  2986. if (vocab.id_to_token[id].type != LLAMA_TOKEN_TYPE_NORMAL) {
  2987. special_tokens_count_by_type++;
  2988. }
  2989. // Skip single character tokens
  2990. if (token.length() > 1) {
  2991. bool is_tokenizable = false;
  2992. // Split token string representation in two, in all possible ways
  2993. // and check if both halves can be matched to a valid token
  2994. for (unsigned i = 1; i < token.length();) {
  2995. const auto left = token.substr(0, i);
  2996. const auto right = token.substr(i);
  2997. // check if we didnt partition in the middle of a utf sequence
  2998. auto utf = utf8_len(left.at(left.length() - 1));
  2999. if (utf == 1) {
  3000. if (vocab.token_to_id.find(left) != vocab.token_to_id.end() &&
  3001. vocab.token_to_id.find(right) != vocab.token_to_id.end() ) {
  3002. is_tokenizable = true;
  3003. break;
  3004. }
  3005. i++;
  3006. } else {
  3007. // skip over the rest of multibyte utf sequence
  3008. i += utf - 1;
  3009. }
  3010. }
  3011. if (!is_tokenizable) {
  3012. // Some tokens are multibyte, but they are utf sequences with equivalent text length of 1
  3013. // it's faster to re-filter them here, since there are way less candidates now
  3014. // Calculate a total "utf" length of a token string representation
  3015. size_t utf8_str_len = 0;
  3016. for (unsigned i = 0; i < token.length();) {
  3017. utf8_str_len++;
  3018. i += utf8_len(token.at(i));
  3019. }
  3020. // And skip the ones which are one character
  3021. if (utf8_str_len > 1) {
  3022. // At this point what we have left are special tokens only
  3023. vocab.special_tokens_cache[token] = id;
  3024. // Count manually found special tokens
  3025. special_tokens_count_from_verification++;
  3026. // If this manually found special token is not marked as such, flag a mismatch
  3027. if (vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL) {
  3028. special_tokens_definition_mismatch = true;
  3029. }
  3030. }
  3031. }
  3032. }
  3033. }
  3034. if (special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type) {
  3035. LLAMA_LOG_WARN("%s: mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
  3036. __func__,
  3037. special_tokens_count_from_verification, vocab.id_to_token.size(),
  3038. special_tokens_count_by_type, vocab.id_to_token.size()
  3039. );
  3040. } else {
  3041. LLAMA_LOG_INFO("%s: special tokens definition check successful ( %u/%zu ).\n",
  3042. __func__,
  3043. special_tokens_count_from_verification, vocab.id_to_token.size()
  3044. );
  3045. }
  3046. }
  3047. }
  3048. static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
  3049. const auto & hparams = model.hparams;
  3050. const auto & vocab = model.vocab;
  3051. const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
  3052. // hparams
  3053. LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
  3054. LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch));
  3055. LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, llama_model_vocab_type_name(vocab.type));
  3056. LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  3057. LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
  3058. LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
  3059. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  3060. LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
  3061. LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
  3062. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  3063. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot);
  3064. LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k);
  3065. LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v);
  3066. LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
  3067. LLAMA_LOG_INFO("%s: n_embd_k_gqa = %u\n", __func__, hparams.n_embd_k_gqa());
  3068. LLAMA_LOG_INFO("%s: n_embd_v_gqa = %u\n", __func__, hparams.n_embd_v_gqa());
  3069. LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
  3070. LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
  3071. LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
  3072. LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
  3073. LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
  3074. LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
  3075. LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
  3076. LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type);
  3077. LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
  3078. LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
  3079. LLAMA_LOG_INFO("%s: n_yarn_orig_ctx = %u\n", __func__, hparams.n_yarn_orig_ctx);
  3080. LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
  3081. LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
  3082. LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
  3083. if (ml.n_elements >= 1e12) {
  3084. LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, ml.n_elements*1e-12);
  3085. } else if (ml.n_elements >= 1e9) {
  3086. LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
  3087. } else if (ml.n_elements >= 1e6) {
  3088. LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, ml.n_elements*1e-6);
  3089. } else {
  3090. LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
  3091. }
  3092. if (ml.n_bytes < GiB) {
  3093. LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  3094. } else {
  3095. LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  3096. }
  3097. // general kv
  3098. LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
  3099. // special tokens
  3100. if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
  3101. if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
  3102. if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
  3103. if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
  3104. if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
  3105. if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
  3106. }
  3107. // Returns false if cancelled by progress_callback
  3108. static bool llm_load_tensors(
  3109. llama_model_loader & ml,
  3110. llama_model & model,
  3111. int n_gpu_layers,
  3112. enum llama_split_mode split_mode,
  3113. int main_gpu,
  3114. const float * tensor_split,
  3115. bool use_mlock,
  3116. llama_progress_callback progress_callback,
  3117. void * progress_callback_user_data) {
  3118. model.t_start_us = ggml_time_us();
  3119. auto & hparams = model.hparams;
  3120. model.split_mode = split_mode;
  3121. model.main_gpu = main_gpu;
  3122. model.n_gpu_layers = n_gpu_layers;
  3123. const int64_t n_layer = hparams.n_layer;
  3124. const int64_t i_gpu_start = std::max((int64_t) hparams.n_layer - n_gpu_layers, (int64_t) 0);
  3125. // there is very little benefit to offloading the input layer, so always keep it on the CPU
  3126. model.buft_input = llama_default_buffer_type_cpu(true);
  3127. model.buft_layer.resize(n_layer);
  3128. // assign cpu layers
  3129. for (int64_t i = 0; i < i_gpu_start; ++i) {
  3130. model.buft_layer[i] = llama_default_buffer_type_cpu(true);
  3131. }
  3132. if (split_mode == LLAMA_SPLIT_LAYER) {
  3133. // calculate the split points
  3134. int device_count = llama_get_device_count();
  3135. bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + device_count, [](float x) { return x == 0.0f; });
  3136. std::vector<float> splits(device_count);
  3137. if (all_zero) {
  3138. // default split, by free memory
  3139. for (int i = 0; i < device_count; ++i) {
  3140. splits[i] = llama_get_device_memory(i);
  3141. }
  3142. } else {
  3143. std::copy(tensor_split, tensor_split + device_count, splits.begin());
  3144. }
  3145. // sum and normalize the splits to get the split points
  3146. float split_sum = 0.0f;
  3147. for (int i = 0; i < device_count; ++i) {
  3148. split_sum += splits[i];
  3149. splits[i] = split_sum;
  3150. }
  3151. for (int i = 0; i < device_count; ++i) {
  3152. splits[i] /= split_sum;
  3153. }
  3154. // assign the repeating layers to the devices according to the splits
  3155. int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1);
  3156. for (int64_t i = i_gpu_start; i < n_layer; ++i) {
  3157. int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits.begin();
  3158. model.buft_layer[i] = llama_default_buffer_type_offload(layer_gpu);
  3159. }
  3160. // assign the output layer
  3161. if (n_gpu_layers > n_layer) {
  3162. int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(act_gpu_layers - 1)/act_gpu_layers) - splits.begin();
  3163. model.buft_output = llama_default_buffer_type_offload(layer_gpu);
  3164. } else {
  3165. model.buft_output = llama_default_buffer_type_cpu(true);
  3166. }
  3167. } else {
  3168. ggml_backend_buffer_type_t split_buft;
  3169. if (split_mode == LLAMA_SPLIT_ROW) {
  3170. split_buft = llama_default_buffer_type_split(main_gpu, tensor_split);
  3171. } else {
  3172. // LLAMA_SPLIT_NONE or LLAMA_SPLIT_LAYER in backends where it is not supported
  3173. split_buft = llama_default_buffer_type_offload(main_gpu);
  3174. }
  3175. // assign the repeating layers
  3176. for (int64_t i = i_gpu_start; i < n_layer; ++i) {
  3177. model.buft_layer[i] = {
  3178. split_buft,
  3179. llama_default_buffer_type_offload(main_gpu)
  3180. };
  3181. }
  3182. // assign the output layer
  3183. if (n_gpu_layers > n_layer) {
  3184. model.buft_output = {
  3185. split_buft,
  3186. llama_default_buffer_type_offload(main_gpu)
  3187. };
  3188. } else {
  3189. model.buft_output = llama_default_buffer_type_cpu(true);
  3190. }
  3191. }
  3192. // count used buffer types
  3193. std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
  3194. buft_layer_count[model.buft_input.buft]++;
  3195. buft_layer_count[model.buft_input.buft_matrix]++;
  3196. buft_layer_count[model.buft_output.buft]++;
  3197. buft_layer_count[model.buft_output.buft_matrix]++;
  3198. for (int64_t i = 0; i < n_layer; ++i) {
  3199. buft_layer_count[model.buft_layer[i].buft]++;
  3200. buft_layer_count[model.buft_layer[i].buft_matrix]++;
  3201. }
  3202. // create one context per buffer type
  3203. size_t ctx_size = ggml_tensor_overhead()*ml.n_tensors;
  3204. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  3205. for (auto & it : buft_layer_count) {
  3206. struct ggml_init_params params = {
  3207. /*.mem_size =*/ ctx_size,
  3208. /*.mem_buffer =*/ NULL,
  3209. /*.no_alloc =*/ true,
  3210. };
  3211. ggml_context * ctx = ggml_init(params);
  3212. if (!ctx) {
  3213. throw std::runtime_error(format("failed to create context"));
  3214. }
  3215. ctx_map[it.first] = ctx;
  3216. model.ctxs.push_back(ctx);
  3217. }
  3218. LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0);
  3219. // create tensors for the weights
  3220. {
  3221. const int64_t n_embd = hparams.n_embd;
  3222. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  3223. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  3224. const int64_t n_embd_gqa = n_embd_v_gqa;
  3225. const int64_t n_vocab = hparams.n_vocab;
  3226. const int64_t n_vocab_type = hparams.n_vocab_type;
  3227. const int64_t n_ff = hparams.n_ff;
  3228. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  3229. ggml_context * ctx_input = ctx_map.at(model.buft_input.buft);
  3230. ggml_context * ctx_output = ctx_map.at(model.buft_output.buft);
  3231. ggml_context * ctx_output_split = ctx_map.at(model.buft_output.buft_matrix);
  3232. auto ctx_for_layer = [&](int i) { return ctx_map.at(model.buft_layer[i].buft); };
  3233. auto ctx_for_layer_split = [&](int i) { return ctx_map.at(model.buft_layer[i].buft_matrix); };
  3234. model.layers.resize(n_layer);
  3235. const auto tn = LLM_TN(model.arch);
  3236. switch (model.arch) {
  3237. case LLM_ARCH_LLAMA:
  3238. case LLM_ARCH_REFACT:
  3239. case LLM_ARCH_MINICPM:
  3240. {
  3241. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3242. // output
  3243. {
  3244. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3245. if (model.arch != LLM_ARCH_MINICPM){
  3246. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3247. }
  3248. }
  3249. for (int i = 0; i < n_layer; ++i) {
  3250. ggml_context * ctx_layer = ctx_for_layer(i);
  3251. ggml_context * ctx_split = ctx_for_layer_split(i);
  3252. auto & layer = model.layers[i];
  3253. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3254. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3255. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3256. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3257. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3258. // optional bias tensors
  3259. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, false);
  3260. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, false);
  3261. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, false);
  3262. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, false);
  3263. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3264. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd}, false);
  3265. if (layer.ffn_gate_inp == nullptr) {
  3266. GGML_ASSERT(hparams.n_expert == 0);
  3267. GGML_ASSERT(hparams.n_expert_used == 0);
  3268. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3269. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3270. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3271. } else {
  3272. GGML_ASSERT(hparams.n_expert > 0);
  3273. GGML_ASSERT(hparams.n_expert_used > 0);
  3274. // MoE branch
  3275. for (uint32_t x = 0; x < hparams.n_expert; ++x) {
  3276. layer.ffn_gate_exp[x] = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), {n_embd, n_ff});
  3277. layer.ffn_down_exp[x] = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd});
  3278. layer.ffn_up_exp[x] = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), {n_embd, n_ff});
  3279. }
  3280. }
  3281. }
  3282. } break;
  3283. case LLM_ARCH_BAICHUAN:
  3284. {
  3285. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3286. {
  3287. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3288. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3289. }
  3290. for (int i = 0; i < n_layer; ++i) {
  3291. ggml_context * ctx_layer = ctx_for_layer(i);
  3292. ggml_context * ctx_split = ctx_for_layer_split(i);
  3293. auto & layer = model.layers[i];
  3294. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3295. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3296. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3297. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3298. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3299. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3300. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3301. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3302. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3303. }
  3304. } break;
  3305. case LLM_ARCH_FALCON:
  3306. {
  3307. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3308. // output
  3309. {
  3310. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3311. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3312. if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_OUTPUT, "weight").c_str()) >= 0) {
  3313. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3314. } else {
  3315. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // needs to be on GPU
  3316. ml.n_created--; // artificial tensor
  3317. }
  3318. }
  3319. for (int i = 0; i < n_layer; ++i) {
  3320. ggml_context * ctx_layer = ctx_for_layer(i);
  3321. ggml_context * ctx_split = ctx_for_layer_split(i);
  3322. auto & layer = model.layers[i];
  3323. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3324. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3325. if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i).c_str()) >= 0) {
  3326. layer.attn_norm_2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd});
  3327. layer.attn_norm_2_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd});
  3328. }
  3329. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3330. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3331. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3332. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3333. }
  3334. } break;
  3335. case LLM_ARCH_STARCODER:
  3336. {
  3337. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3338. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  3339. // output
  3340. {
  3341. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3342. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3343. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3344. }
  3345. for (int i = 0; i < n_layer; ++i) {
  3346. ggml_context * ctx_layer = ctx_for_layer(i);
  3347. ggml_context * ctx_split = ctx_for_layer_split(i);
  3348. auto & layer = model.layers[i];
  3349. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3350. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3351. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3352. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  3353. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3354. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3355. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3356. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3357. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3358. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3359. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3360. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3361. }
  3362. } break;
  3363. case LLM_ARCH_PERSIMMON:
  3364. {
  3365. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3366. {
  3367. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3368. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3369. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3370. }
  3371. for (int i = 0; i < n_layer; ++i) {
  3372. ggml_context * ctx_layer = ctx_for_layer(i);
  3373. ggml_context * ctx_split = ctx_for_layer_split(i);
  3374. auto & layer = model.layers[i];
  3375. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3376. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3377. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3378. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  3379. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3380. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3381. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3382. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3383. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3384. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3385. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3386. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3387. layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64});
  3388. layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64});
  3389. layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {64});
  3390. layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64});
  3391. }
  3392. } break;
  3393. case LLM_ARCH_BERT:
  3394. case LLM_ARCH_NOMIC_BERT:
  3395. {
  3396. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3397. model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type});
  3398. if (model.arch == LLM_ARCH_BERT) {
  3399. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  3400. }
  3401. model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
  3402. model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
  3403. for (int i = 0; i < n_layer; ++i) {
  3404. ggml_context * ctx_layer = ctx_for_layer(i);
  3405. ggml_context * ctx_split = ctx_for_layer_split(i);
  3406. auto & layer = model.layers[i];
  3407. if (model.arch == LLM_ARCH_BERT) {
  3408. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3409. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  3410. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3411. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  3412. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3413. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  3414. } else {
  3415. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3416. }
  3417. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3418. layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
  3419. layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
  3420. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3421. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3422. if (model.arch == LLM_ARCH_BERT) {
  3423. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3424. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3425. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3426. } else {
  3427. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3428. }
  3429. layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
  3430. layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
  3431. }
  3432. } break;
  3433. case LLM_ARCH_BLOOM:
  3434. {
  3435. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3436. model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
  3437. model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
  3438. // output
  3439. {
  3440. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3441. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3442. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3443. }
  3444. for (int i = 0; i < n_layer; ++i) {
  3445. ggml_context * ctx_layer = ctx_for_layer(i);
  3446. ggml_context * ctx_split = ctx_for_layer_split(i);
  3447. auto & layer = model.layers[i];
  3448. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3449. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3450. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3451. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  3452. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3453. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3454. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3455. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3456. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3457. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3458. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3459. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3460. }
  3461. } break;
  3462. case LLM_ARCH_MPT:
  3463. {
  3464. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3465. // output
  3466. {
  3467. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3468. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3469. }
  3470. for (int i = 0; i < n_layer; ++i) {
  3471. ggml_context * ctx_layer = ctx_for_layer(i);
  3472. ggml_context * ctx_split = ctx_for_layer_split(i);
  3473. auto & layer = model.layers[i];
  3474. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3475. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3476. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3477. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3478. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3479. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3480. // AWQ ScaleActivation layer
  3481. layer.ffn_act = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, false);
  3482. }
  3483. } break;
  3484. case LLM_ARCH_STABLELM:
  3485. {
  3486. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3487. // output
  3488. {
  3489. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3490. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3491. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3492. }
  3493. for (int i = 0; i < n_layer; ++i) {
  3494. ggml_context * ctx_layer = ctx_for_layer(i);
  3495. ggml_context * ctx_split = ctx_for_layer_split(i);
  3496. auto & layer = model.layers[i];
  3497. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3498. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3499. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3500. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3501. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3502. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3503. // optional bias tensors, present in Stable LM 2 1.6B
  3504. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, false);
  3505. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, false);
  3506. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, false);
  3507. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3508. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3509. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3510. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3511. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3512. }
  3513. } break;
  3514. case LLM_ARCH_QWEN:
  3515. {
  3516. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3517. // output
  3518. {
  3519. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3520. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3521. }
  3522. for (int i = 0; i < n_layer; ++i) {
  3523. ggml_context * ctx_layer = ctx_for_layer(i);
  3524. ggml_context * ctx_split = ctx_for_layer_split(i);
  3525. auto & layer = model.layers[i];
  3526. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3527. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3});
  3528. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd*3});
  3529. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3530. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3531. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2});
  3532. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd});
  3533. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff/2});
  3534. }
  3535. } break;
  3536. case LLM_ARCH_QWEN2:
  3537. {
  3538. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3539. // output
  3540. {
  3541. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3542. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3543. }
  3544. for (int i = 0; i < n_layer; ++i) {
  3545. ggml_context * ctx_layer = ctx_for_layer(i);
  3546. ggml_context * ctx_split = ctx_for_layer_split(i);
  3547. auto & layer = model.layers[i];
  3548. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3549. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3550. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3551. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3552. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3553. // optional bias tensors
  3554. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  3555. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  3556. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  3557. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3558. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3559. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3560. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3561. }
  3562. } break;
  3563. case LLM_ARCH_PHI2:
  3564. {
  3565. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3566. // output
  3567. {
  3568. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3569. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3570. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3571. model.output_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab});
  3572. }
  3573. for (int i = 0; i < n_layer; ++i) {
  3574. ggml_context * ctx_layer = ctx_for_layer(i);
  3575. ggml_context * ctx_split = ctx_for_layer_split(i);
  3576. auto & layer = model.layers[i];
  3577. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3578. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3579. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, false);
  3580. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, false);
  3581. if (layer.wqkv == nullptr) {
  3582. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3583. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  3584. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3585. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  3586. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3587. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  3588. }
  3589. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3590. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3591. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3592. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3593. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3594. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3595. }
  3596. } break;
  3597. case LLM_ARCH_PLAMO:
  3598. {
  3599. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3600. // output
  3601. {
  3602. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3603. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3604. }
  3605. for (int i = 0; i < n_layer; ++i) {
  3606. ggml_context * ctx_layer = ctx_for_layer(i);
  3607. ggml_context * ctx_split = ctx_for_layer_split(i);
  3608. auto & layer = model.layers[i];
  3609. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3610. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3611. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3612. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3613. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3614. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3615. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3616. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3617. }
  3618. } break;
  3619. case LLM_ARCH_GPT2:
  3620. {
  3621. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3622. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  3623. // output
  3624. {
  3625. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3626. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3627. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3628. }
  3629. for (int i = 0; i < n_layer; ++i) {
  3630. ggml_context * ctx_layer = ctx_for_layer(i);
  3631. ggml_context * ctx_split = ctx_for_layer_split(i);
  3632. auto & layer = model.layers[i];
  3633. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3634. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3635. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3636. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  3637. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3638. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3639. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3640. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3641. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3642. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3643. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3644. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3645. }
  3646. } break;
  3647. case LLM_ARCH_CODESHELL:
  3648. {
  3649. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3650. // output
  3651. {
  3652. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3653. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3654. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3655. }
  3656. for (int i = 0; i < n_layer; ++i) {
  3657. ggml_context * ctx_layer = ctx_for_layer(i);
  3658. ggml_context * ctx_split = ctx_for_layer_split(i);
  3659. auto & layer = model.layers[i];
  3660. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3661. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3662. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3663. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  3664. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3665. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  3666. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3667. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3668. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  3669. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  3670. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3671. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  3672. }
  3673. } break;
  3674. case LLM_ARCH_ORION:
  3675. {
  3676. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3677. {
  3678. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3679. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  3680. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3681. }
  3682. for (int i = 0; i < n_layer; ++i) {
  3683. ggml_context * ctx_layer = ctx_for_layer(i);
  3684. ggml_context * ctx_split = ctx_for_layer_split(i);
  3685. auto & layer = model.layers[i];
  3686. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3687. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  3688. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3689. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3690. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3691. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3692. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3693. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  3694. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3695. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3696. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3697. }
  3698. } break;
  3699. case LLM_ARCH_INTERNLM2:
  3700. {
  3701. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  3702. // output
  3703. {
  3704. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  3705. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  3706. }
  3707. for (int i = 0; i < n_layer; ++i) {
  3708. ggml_context * ctx_layer = ctx_for_layer(i);
  3709. ggml_context * ctx_split = ctx_for_layer_split(i);
  3710. auto & layer = model.layers[i];
  3711. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  3712. // layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  3713. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  3714. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  3715. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  3716. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  3717. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  3718. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  3719. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  3720. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  3721. }
  3722. } break;
  3723. default:
  3724. throw std::runtime_error("unknown architecture");
  3725. }
  3726. }
  3727. ml.done_getting_tensors();
  3728. ml.init_mapping(true, use_mlock ? &model.mlock_mmap : nullptr);
  3729. // create the backend buffers
  3730. std::vector<std::pair<ggml_context *, ggml_backend_buffer_t>> ctx_bufs;
  3731. for (auto & it : ctx_map) {
  3732. ggml_backend_buffer_type_t buft = it.first;
  3733. ggml_context * ctx = it.second;
  3734. ggml_backend_buffer_t buf = nullptr;
  3735. // only the mmap region containing the tensors in the model is mapped to the backend buffer
  3736. // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
  3737. // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
  3738. if (ml.use_mmap && buft == llama_default_buffer_type_cpu(true)) {
  3739. size_t first, last;
  3740. ml.get_mapping_range(&first, &last, ctx);
  3741. buf = ggml_backend_cpu_buffer_from_ptr((char *) ml.mapping->addr + first, last - first);
  3742. }
  3743. #ifdef GGML_USE_METAL
  3744. else if (ml.use_mmap && buft == ggml_backend_metal_buffer_type()) {
  3745. const size_t max_size = ggml_get_max_tensor_size(ctx);
  3746. size_t first, last;
  3747. ml.get_mapping_range(&first, &last, ctx);
  3748. buf = ggml_backend_metal_buffer_from_ptr((char *) ml.mapping->addr + first, last - first, max_size);
  3749. }
  3750. #endif
  3751. else {
  3752. buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  3753. if (buf != nullptr && use_mlock && ggml_backend_buffer_is_host(buf)) {
  3754. model.mlock_bufs.emplace_back(new llama_mlock);
  3755. auto & mlock_buf = model.mlock_bufs.back();
  3756. mlock_buf->init (ggml_backend_buffer_get_base(buf));
  3757. mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
  3758. }
  3759. }
  3760. if (buf == nullptr) {
  3761. throw std::runtime_error("failed to allocate buffer");
  3762. }
  3763. // indicate that this buffer contains weights
  3764. // this is used by ggml_backend_sched to improve op scheduling -> ops that use a weight are preferably scheduled to the backend that contains the weight
  3765. ggml_backend_buffer_set_usage(buf, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  3766. model.bufs.push_back(buf);
  3767. ctx_bufs.emplace_back(ctx, buf);
  3768. }
  3769. if (llama_supports_gpu_offload()) {
  3770. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  3771. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  3772. if (n_gpu_layers > (int) hparams.n_layer) {
  3773. LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
  3774. }
  3775. const int max_backend_supported_layers = hparams.n_layer + 1;
  3776. const int max_offloadable_layers = hparams.n_layer + 1;
  3777. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  3778. }
  3779. // print memory requirements
  3780. for (ggml_backend_buffer_t buf : model.bufs) {
  3781. LLAMA_LOG_INFO("%s: %10s buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
  3782. }
  3783. // populate tensors_by_name
  3784. for (ggml_context * ctx : model.ctxs) {
  3785. for (auto * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
  3786. model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
  3787. }
  3788. }
  3789. // load tensor data
  3790. for (auto & it : ctx_bufs) {
  3791. ggml_context * ctx = it.first;
  3792. ggml_backend_buffer_t buf = it.second;
  3793. if (!ml.load_all_data(ctx, progress_callback, progress_callback_user_data, buf, use_mlock ? &model.mlock_mmap : NULL)) {
  3794. return false;
  3795. }
  3796. }
  3797. model.mapping = std::move(ml.mapping);
  3798. // loading time will be recalculate after the first eval, so
  3799. // we take page faults deferred by mmap() into consideration
  3800. model.t_load_us = ggml_time_us() - model.t_start_us;
  3801. return true;
  3802. }
  3803. // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
  3804. static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
  3805. try {
  3806. llama_model_loader ml(fname, params.use_mmap, params.kv_overrides);
  3807. model.hparams.vocab_only = params.vocab_only;
  3808. try {
  3809. llm_load_arch(ml, model);
  3810. } catch(const std::exception & e) {
  3811. throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
  3812. }
  3813. try {
  3814. llm_load_hparams(ml, model);
  3815. } catch(const std::exception & e) {
  3816. throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
  3817. }
  3818. try {
  3819. llm_load_vocab(ml, model);
  3820. } catch(const std::exception & e) {
  3821. throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
  3822. }
  3823. llm_load_print_meta(ml, model);
  3824. if (model.hparams.n_vocab != model.vocab.id_to_token.size()) {
  3825. throw std::runtime_error("vocab size mismatch");
  3826. }
  3827. if (params.vocab_only) {
  3828. LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
  3829. return 0;
  3830. }
  3831. #ifdef GGML_USE_KOMPUTE
  3832. if (params.n_gpu_layers > 0 && (
  3833. !(model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON)
  3834. || !(
  3835. model.ftype == LLAMA_FTYPE_ALL_F32 ||
  3836. model.ftype == LLAMA_FTYPE_MOSTLY_F16 ||
  3837. model.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
  3838. model.ftype == LLAMA_FTYPE_MOSTLY_Q4_1
  3839. )
  3840. )) {
  3841. // TODO(cebtenzzre): propagate this error outside of llama_load_model_from_file
  3842. LLAMA_LOG_WARN("%s: disabling Kompute due to unsupported model arch or quantization\n", __func__);
  3843. params.n_gpu_layers = 0;
  3844. }
  3845. #endif
  3846. if (!llm_load_tensors(
  3847. ml, model, params.n_gpu_layers, params.split_mode, params.main_gpu, params.tensor_split, params.use_mlock,
  3848. params.progress_callback, params.progress_callback_user_data
  3849. )) {
  3850. return -2;
  3851. }
  3852. } catch (const std::exception & err) {
  3853. LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
  3854. return -1;
  3855. }
  3856. return 0;
  3857. }
  3858. //
  3859. // llm_build
  3860. //
  3861. using llm_build_cb = std::function<void(struct ggml_tensor * cur, const char * name, int nl)>;
  3862. enum llm_rope_type {
  3863. LLM_ROPE,
  3864. LLM_ROPE_NEOX,
  3865. LLM_ROPE_GLM,
  3866. };
  3867. enum llm_ffn_op_type {
  3868. LLM_FFN_SILU,
  3869. LLM_FFN_GELU,
  3870. LLM_FFN_RELU,
  3871. LLM_FFN_RELU_SQR,
  3872. };
  3873. enum llm_ffn_gate_type {
  3874. LLM_FFN_SEQ,
  3875. LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
  3876. };
  3877. enum llm_norm_type {
  3878. LLM_NORM,
  3879. LLM_NORM_RMS,
  3880. };
  3881. static struct ggml_tensor * llm_build_inp_embd(
  3882. struct ggml_context * ctx,
  3883. const llama_hparams & hparams,
  3884. const llama_batch & batch,
  3885. struct ggml_tensor * tok_embd,
  3886. struct ggml_tensor * inp_tokens,
  3887. struct ggml_tensor * inp_embd,
  3888. const llm_build_cb & cb) {
  3889. const int64_t n_embd = hparams.n_embd;
  3890. struct ggml_tensor * inpL;
  3891. if (batch.token) {
  3892. struct ggml_tensor * inp_tokens_v = ggml_view_1d(ctx, inp_tokens, batch.n_tokens, 0);
  3893. cb(inp_tokens, "inp_tokens", -1);
  3894. inpL = ggml_get_rows(ctx, tok_embd, inp_tokens_v);
  3895. } else {
  3896. #ifdef GGML_USE_MPI
  3897. GGML_ASSERT(false && "not implemented");
  3898. #endif
  3899. inpL = ggml_view_2d(ctx, inp_embd, n_embd, batch.n_tokens, inp_embd->nb[1], 0);
  3900. }
  3901. return inpL;
  3902. }
  3903. // Persimmon: n_rot = n_embd_head_k/2
  3904. // Other: n_rot = n_embd_head_k
  3905. static void llm_build_k_shift(
  3906. struct ggml_context * ctx,
  3907. const llama_hparams & hparams,
  3908. const llama_cparams & cparams,
  3909. const llama_kv_cache & kv,
  3910. struct ggml_cgraph * graph,
  3911. struct ggml_tensor * K_shift,
  3912. llm_rope_type type,
  3913. int64_t n_ctx,
  3914. float freq_base,
  3915. float freq_scale,
  3916. const llm_build_cb & cb) {
  3917. const int64_t n_layer = hparams.n_layer;
  3918. const int64_t n_head_kv = hparams.n_head_kv;
  3919. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  3920. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  3921. const int32_t n_rot = hparams.n_rot;
  3922. const int32_t n_orig_ctx = cparams.n_yarn_orig_ctx;
  3923. const float ext_factor = cparams.yarn_ext_factor;
  3924. const float attn_factor = cparams.yarn_attn_factor;
  3925. const float beta_fast = cparams.yarn_beta_fast;
  3926. const float beta_slow = cparams.yarn_beta_slow;
  3927. int rope_type = 0;
  3928. switch (type) {
  3929. case LLM_ROPE: rope_type = 0; break;
  3930. case LLM_ROPE_NEOX: rope_type = 2; break;
  3931. case LLM_ROPE_GLM: rope_type = 4; break;
  3932. }
  3933. for (int il = 0; il < n_layer; ++il) {
  3934. struct ggml_tensor * tmp =
  3935. // we rotate only the first n_rot dimensions
  3936. ggml_rope_custom_inplace(ctx,
  3937. ggml_view_3d(ctx, kv.k_l[il],
  3938. n_embd_head_k, n_head_kv, n_ctx,
  3939. ggml_row_size(kv.k_l[il]->type, n_embd_head_k),
  3940. ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa),
  3941. 0),
  3942. K_shift, n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  3943. ext_factor, attn_factor, beta_fast, beta_slow);
  3944. cb(tmp, "K_shifted", il);
  3945. ggml_build_forward_expand(graph, tmp);
  3946. }
  3947. }
  3948. static void llm_build_kv_store(
  3949. struct ggml_context * ctx,
  3950. const llama_hparams & hparams,
  3951. const llama_kv_cache & kv,
  3952. struct ggml_cgraph * graph,
  3953. struct ggml_tensor * k_cur,
  3954. struct ggml_tensor * v_cur,
  3955. int64_t n_ctx,
  3956. int32_t n_tokens,
  3957. int32_t kv_head,
  3958. const llm_build_cb & cb,
  3959. int64_t il) {
  3960. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  3961. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  3962. // compute the transposed [n_tokens, n_embd] V matrix
  3963. struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, n_embd_v_gqa, n_tokens));
  3964. //struct ggml_tensor * v_cur_t = ggml_transpose(ctx, v_cur); // TODO: reshape above is likely not needed
  3965. cb(v_cur_t, "v_cur_t", il);
  3966. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_k_gqa,
  3967. (ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa))*kv_head);
  3968. cb(k_cache_view, "k_cache_view", il);
  3969. struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa,
  3970. ( n_ctx)*ggml_element_size(kv.v_l[il]),
  3971. (kv_head)*ggml_element_size(kv.v_l[il]));
  3972. cb(v_cache_view, "v_cache_view", il);
  3973. // important: storing RoPE-ed version of K in the KV cache!
  3974. ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view));
  3975. ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur_t, v_cache_view));
  3976. }
  3977. static struct ggml_tensor * llm_build_norm(
  3978. struct ggml_context * ctx,
  3979. struct ggml_tensor * cur,
  3980. const llama_hparams & hparams,
  3981. struct ggml_tensor * mw,
  3982. struct ggml_tensor * mb,
  3983. llm_norm_type type,
  3984. const llm_build_cb & cb,
  3985. int il) {
  3986. switch (type) {
  3987. case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break;
  3988. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break;
  3989. }
  3990. if (mw || mb) {
  3991. cb(cur, "norm", il);
  3992. }
  3993. if (mw) {
  3994. cur = ggml_mul(ctx, cur, mw);
  3995. if (mb) {
  3996. cb(cur, "norm_w", il);
  3997. }
  3998. }
  3999. if (mb) {
  4000. cur = ggml_add(ctx, cur, mb);
  4001. }
  4002. return cur;
  4003. }
  4004. static struct ggml_tensor * llm_build_ffn(
  4005. struct ggml_context * ctx,
  4006. struct ggml_tensor * cur,
  4007. struct ggml_tensor * up,
  4008. struct ggml_tensor * up_b,
  4009. struct ggml_tensor * gate,
  4010. struct ggml_tensor * gate_b,
  4011. struct ggml_tensor * down,
  4012. struct ggml_tensor * down_b,
  4013. struct ggml_tensor * act_scales,
  4014. llm_ffn_op_type type_op,
  4015. llm_ffn_gate_type type_gate,
  4016. const llm_build_cb & cb,
  4017. int il) {
  4018. struct ggml_tensor * tmp = ggml_mul_mat(ctx, up, cur);
  4019. cb(tmp, "ffn_up", il);
  4020. if (up_b) {
  4021. tmp = ggml_add(ctx, tmp, up_b);
  4022. cb(tmp, "ffn_up_b", il);
  4023. }
  4024. if (gate) {
  4025. switch (type_gate) {
  4026. case LLM_FFN_SEQ:
  4027. {
  4028. cur = ggml_mul_mat(ctx, gate, tmp);
  4029. cb(cur, "ffn_gate", il);
  4030. } break;
  4031. case LLM_FFN_PAR:
  4032. {
  4033. cur = ggml_mul_mat(ctx, gate, cur);
  4034. cb(cur, "ffn_gate", il);
  4035. } break;
  4036. }
  4037. if (gate_b) {
  4038. cur = ggml_add(ctx, cur, gate_b);
  4039. cb(cur, "ffn_gate_b", il);
  4040. }
  4041. } else {
  4042. cur = tmp;
  4043. }
  4044. switch (type_op) {
  4045. case LLM_FFN_SILU:
  4046. {
  4047. cur = ggml_silu(ctx, cur);
  4048. cb(cur, "ffn_silu", il);
  4049. } break;
  4050. case LLM_FFN_GELU:
  4051. {
  4052. cur = ggml_gelu(ctx, cur);
  4053. cb(cur, "ffn_gelu", il);
  4054. if (act_scales != NULL) {
  4055. cur = ggml_div(ctx, cur, act_scales);
  4056. cb(cur, "ffn_act", il);
  4057. }
  4058. } break;
  4059. case LLM_FFN_RELU:
  4060. {
  4061. cur = ggml_relu(ctx, cur);
  4062. cb(cur, "ffn_relu", il);
  4063. } break;
  4064. case LLM_FFN_RELU_SQR:
  4065. {
  4066. cur = ggml_relu(ctx, cur);
  4067. cb(cur, "ffn_relu", il);
  4068. cur = ggml_sqr(ctx, cur);
  4069. cb(cur, "ffn_sqr(relu)", il);
  4070. } break;
  4071. }
  4072. if (type_gate == LLM_FFN_PAR) {
  4073. cur = ggml_mul(ctx, cur, tmp);
  4074. cb(cur, "ffn_gate_par", il);
  4075. }
  4076. cur = ggml_mul_mat(ctx, down, cur);
  4077. if (down_b) {
  4078. cb(cur, "ffn_down", il);
  4079. }
  4080. if (down_b) {
  4081. cur = ggml_add(ctx, cur, down_b);
  4082. }
  4083. return cur;
  4084. }
  4085. // if max_alibi_bias > 0 then apply ALiBi
  4086. static struct ggml_tensor * llm_build_kqv(
  4087. struct ggml_context * ctx,
  4088. const llama_model & model,
  4089. const llama_hparams & hparams,
  4090. const llama_kv_cache & kv,
  4091. struct ggml_cgraph * graph,
  4092. struct ggml_tensor * wo,
  4093. struct ggml_tensor * wo_b,
  4094. struct ggml_tensor * q_cur,
  4095. struct ggml_tensor * kq_mask,
  4096. struct ggml_tensor * kq_pos,
  4097. int64_t n_ctx,
  4098. int32_t n_tokens,
  4099. int32_t n_kv,
  4100. float kq_scale,
  4101. const llm_build_cb & cb,
  4102. int il) {
  4103. const int64_t n_head = hparams.n_head;
  4104. const int64_t n_head_kv = hparams.n_head_kv;
  4105. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  4106. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  4107. const int64_t n_embd_head_v = hparams.n_embd_head_v;
  4108. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  4109. cb(q, "q", il);
  4110. struct ggml_tensor * k =
  4111. ggml_view_3d(ctx, kv.k_l[il],
  4112. n_embd_head_k, n_kv, n_head_kv,
  4113. ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa),
  4114. ggml_row_size(kv.k_l[il]->type, n_embd_head_k),
  4115. 0);
  4116. cb(k, "k", il);
  4117. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  4118. cb(kq, "kq", il);
  4119. if (model.arch == LLM_ARCH_PHI2) {
  4120. // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
  4121. // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
  4122. ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  4123. }
  4124. #if defined(GGML_USE_VULKAN) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_SYCL)
  4125. #pragma message("TODO: ALiBi support in ggml_soft_max_ext is not implemented for Vulkan, Kompute, and SYCL")
  4126. #pragma message(" Falling back to ggml_alibi(). Will become an error in Mar 2024")
  4127. #pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5488")
  4128. if (hparams.f_max_alibi_bias > 0.0f) {
  4129. kq = ggml_scale(ctx, kq, kq_scale);
  4130. cb(kq, "kq_scaled", il);
  4131. kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, hparams.f_max_alibi_bias);
  4132. cb(kq, "kq_scaled_alibi", il);
  4133. kq = ggml_add(ctx, kq, kq_mask);
  4134. cb(kq, "kq_masked", il);
  4135. kq = ggml_soft_max(ctx, kq);
  4136. cb(kq, "kq_soft_max", il);
  4137. } else
  4138. #endif
  4139. {
  4140. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_pos, kq_scale, hparams.f_max_alibi_bias);
  4141. cb(kq, "kq_soft_max_ext", il);
  4142. }
  4143. // split cached v into n_head heads
  4144. struct ggml_tensor * v =
  4145. ggml_view_3d(ctx, kv.v_l[il],
  4146. n_kv, n_embd_head_v, n_head_kv,
  4147. ggml_element_size(kv.v_l[il])*n_ctx,
  4148. ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head_v,
  4149. 0);
  4150. cb(v, "v", il);
  4151. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  4152. cb(kqv, "kqv", il);
  4153. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  4154. cb(kqv_merged, "kqv_merged", il);
  4155. struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_k*n_head, n_tokens);
  4156. cb(cur, "kqv_merged_cont", il);
  4157. ggml_build_forward_expand(graph, cur);
  4158. cur = ggml_mul_mat(ctx, wo, cur);
  4159. if (wo_b) {
  4160. cb(cur, "kqv_wo", il);
  4161. }
  4162. if (wo_b) {
  4163. cur = ggml_add(ctx, cur, wo_b);
  4164. }
  4165. return cur;
  4166. }
  4167. static struct ggml_tensor * llm_build_kv(
  4168. struct ggml_context * ctx,
  4169. const llama_model & model,
  4170. const llama_hparams & hparams,
  4171. const llama_kv_cache & kv,
  4172. struct ggml_cgraph * graph,
  4173. struct ggml_tensor * wo,
  4174. struct ggml_tensor * wo_b,
  4175. struct ggml_tensor * k_cur,
  4176. struct ggml_tensor * v_cur,
  4177. struct ggml_tensor * q_cur,
  4178. struct ggml_tensor * kq_mask,
  4179. struct ggml_tensor * kq_pos,
  4180. int64_t n_ctx,
  4181. int32_t n_tokens,
  4182. int32_t kv_head,
  4183. int32_t n_kv,
  4184. float kq_scale,
  4185. const llm_build_cb & cb,
  4186. int il) {
  4187. // these nodes are added to the graph together so that they are not reordered
  4188. // by doing so, the number of splits in the graph is reduced
  4189. ggml_build_forward_expand(graph, q_cur);
  4190. ggml_build_forward_expand(graph, k_cur);
  4191. ggml_build_forward_expand(graph, v_cur);
  4192. llm_build_kv_store(ctx, hparams, kv, graph, k_cur, v_cur, n_ctx, n_tokens, kv_head, cb, il);
  4193. struct ggml_tensor * cur;
  4194. cur = llm_build_kqv(ctx, model, hparams, kv, graph, wo, wo_b,
  4195. q_cur, kq_mask, kq_pos, n_ctx, n_tokens, n_kv, kq_scale, cb, il);
  4196. cb(cur, "kqv_out", il);
  4197. return cur;
  4198. }
  4199. struct llm_build_context {
  4200. const llama_model & model;
  4201. const llama_context & lctx;
  4202. const llama_hparams & hparams;
  4203. const llama_cparams & cparams;
  4204. const llama_batch & batch;
  4205. const llama_kv_cache & kv_self;
  4206. const int64_t n_embd;
  4207. const int64_t n_layer;
  4208. const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
  4209. const int64_t n_head;
  4210. const int64_t n_head_kv;
  4211. const int64_t n_embd_head_k;
  4212. const int64_t n_embd_k_gqa;
  4213. const int64_t n_embd_head_v;
  4214. const int64_t n_embd_v_gqa;
  4215. const int64_t n_expert;
  4216. const int64_t n_expert_used;
  4217. const float freq_base;
  4218. const float freq_scale;
  4219. const float ext_factor;
  4220. const float attn_factor;
  4221. const float beta_fast;
  4222. const float beta_slow;
  4223. const float norm_eps;
  4224. const float norm_rms_eps;
  4225. const int32_t n_tokens;
  4226. const int32_t n_kv; // size of KV cache to consider (n_kv <= n_ctx)
  4227. const int32_t kv_head; // index of where we store new KV data in the cache
  4228. const int32_t n_orig_ctx;
  4229. const bool do_rope_shift;
  4230. const uint32_t pooling_type;
  4231. const llm_build_cb & cb;
  4232. std::vector<uint8_t> & buf_compute_meta;
  4233. struct ggml_context * ctx0 = nullptr;
  4234. // TODO: consider making the entire interface noexcept
  4235. llm_build_context(
  4236. llama_context & lctx,
  4237. const llama_batch & batch,
  4238. const llm_build_cb & cb,
  4239. bool worst_case) :
  4240. model (lctx.model),
  4241. lctx (lctx),
  4242. hparams (model.hparams),
  4243. cparams (lctx.cparams),
  4244. batch (batch),
  4245. kv_self (lctx.kv_self),
  4246. n_embd (hparams.n_embd),
  4247. n_layer (hparams.n_layer),
  4248. n_ctx (cparams.n_ctx),
  4249. n_head (hparams.n_head),
  4250. n_head_kv (hparams.n_head_kv),
  4251. n_embd_head_k (hparams.n_embd_head_k),
  4252. n_embd_k_gqa (hparams.n_embd_k_gqa()),
  4253. n_embd_head_v (hparams.n_embd_head_v),
  4254. n_embd_v_gqa (hparams.n_embd_v_gqa()),
  4255. n_expert (hparams.n_expert),
  4256. n_expert_used (hparams.n_expert_used),
  4257. freq_base (cparams.rope_freq_base),
  4258. freq_scale (cparams.rope_freq_scale),
  4259. ext_factor (cparams.yarn_ext_factor),
  4260. attn_factor (cparams.yarn_attn_factor),
  4261. beta_fast (cparams.yarn_beta_fast),
  4262. beta_slow (cparams.yarn_beta_slow),
  4263. norm_eps (hparams.f_norm_eps),
  4264. norm_rms_eps (hparams.f_norm_rms_eps),
  4265. n_tokens (batch.n_tokens),
  4266. n_kv (worst_case ? n_ctx : kv_self.n),
  4267. kv_head (worst_case ? n_ctx - n_tokens : kv_self.head),
  4268. n_orig_ctx (cparams.n_yarn_orig_ctx),
  4269. do_rope_shift (worst_case || kv_self.has_shift),
  4270. pooling_type (cparams.do_pooling ? hparams.pooling_type : (uint32_t)LLAMA_POOLING_NONE),
  4271. cb (cb),
  4272. buf_compute_meta (lctx.buf_compute_meta) {
  4273. // all initializations should be done in init()
  4274. }
  4275. void init() {
  4276. struct ggml_init_params params = {
  4277. /*.mem_size =*/ buf_compute_meta.size(),
  4278. /*.mem_buffer =*/ buf_compute_meta.data(),
  4279. /*.no_alloc =*/ true,
  4280. };
  4281. ctx0 = ggml_init(params);
  4282. }
  4283. void free() {
  4284. if (ctx0) {
  4285. ggml_free(ctx0);
  4286. ctx0 = nullptr;
  4287. }
  4288. }
  4289. struct ggml_cgraph * build_llama() {
  4290. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4291. const int64_t n_embd_head = hparams.n_embd_head_v;
  4292. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4293. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4294. struct ggml_tensor * cur;
  4295. struct ggml_tensor * inpL;
  4296. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4297. cb(inpL, "inp_embd", -1);
  4298. // inp_pos - contains the positions
  4299. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4300. cb(inp_pos, "inp_pos", -1);
  4301. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4302. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4303. cb(KQ_mask, "KQ_mask", -1);
  4304. // shift the entire K-cache if needed
  4305. if (do_rope_shift) {
  4306. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  4307. }
  4308. for (int il = 0; il < n_layer; ++il) {
  4309. struct ggml_tensor * inpSA = inpL;
  4310. // norm
  4311. cur = llm_build_norm(ctx0, inpL, hparams,
  4312. model.layers[il].attn_norm, NULL,
  4313. LLM_NORM_RMS, cb, il);
  4314. cb(cur, "attn_norm", il);
  4315. // self-attention
  4316. {
  4317. // compute Q and K and RoPE them
  4318. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4319. cb(Qcur, "Qcur", il);
  4320. if (model.layers[il].bq) {
  4321. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  4322. cb(Qcur, "Qcur", il);
  4323. }
  4324. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4325. cb(Kcur, "Kcur", il);
  4326. if (model.layers[il].bk) {
  4327. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  4328. cb(Kcur, "Kcur", il);
  4329. }
  4330. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4331. cb(Vcur, "Vcur", il);
  4332. if (model.layers[il].bv) {
  4333. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  4334. cb(Vcur, "Vcur", il);
  4335. }
  4336. Qcur = ggml_rope_custom(
  4337. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  4338. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  4339. ext_factor, attn_factor, beta_fast, beta_slow
  4340. );
  4341. cb(Qcur, "Qcur", il);
  4342. Kcur = ggml_rope_custom(
  4343. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  4344. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  4345. ext_factor, attn_factor, beta_fast, beta_slow
  4346. );
  4347. cb(Kcur, "Kcur", il);
  4348. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4349. model.layers[il].wo, model.layers[il].bo,
  4350. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4351. cb(cur, "kqv_out", il);
  4352. }
  4353. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4354. cb(ffn_inp, "ffn_inp", il);
  4355. // feed-forward network
  4356. if (model.layers[il].ffn_gate_inp == nullptr) {
  4357. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4358. model.layers[il].ffn_norm, NULL,
  4359. LLM_NORM_RMS, cb, il);
  4360. cb(cur, "ffn_norm", il);
  4361. cur = llm_build_ffn(ctx0, cur,
  4362. model.layers[il].ffn_up, NULL,
  4363. model.layers[il].ffn_gate, NULL,
  4364. model.layers[il].ffn_down, NULL,
  4365. NULL,
  4366. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4367. cb(cur, "ffn_out", il);
  4368. } else {
  4369. // MoE branch
  4370. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4371. model.layers[il].ffn_norm, NULL,
  4372. LLM_NORM_RMS, cb, il);
  4373. cb(cur, "ffn_norm", il);
  4374. ggml_tensor * logits = ggml_mul_mat(ctx0, model.layers[il].ffn_gate_inp, cur); // [n_tokens, num_experts]
  4375. cb(logits, "ffn_moe_logits", il);
  4376. ggml_tensor * probs = ggml_soft_max(ctx0, logits); // [n_tokens, num_experts]
  4377. cb(probs, "ffn_moe_probs", il);
  4378. // select experts
  4379. ggml_tensor * selected_experts = ggml_top_k(ctx0, probs, n_expert_used); // [n_tokens, num_experts_per_tok]
  4380. cb(selected_experts->src[0], "ffn_moe_argsort", il);
  4381. ggml_tensor * weights = ggml_get_rows(ctx0,
  4382. ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts);
  4383. cb(weights, "ffn_moe_weights", il);
  4384. weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); // [n_tokens, num_experts_per_tok]
  4385. ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights);
  4386. cb(weights_sum, "ffn_moe_weights_sum", il);
  4387. weights = ggml_div(ctx0, weights, weights_sum); // [n_tokens, num_experts_per_tok]
  4388. cb(weights, "ffn_moe_weights_norm", il);
  4389. // compute expert outputs
  4390. ggml_tensor * moe_out = nullptr;
  4391. for (int i = 0; i < n_expert_used; ++i) {
  4392. ggml_tensor * cur_expert;
  4393. ggml_tensor * cur_up = ggml_mul_mat_id(ctx0, model.layers[il].ffn_up_exp, n_expert, selected_experts, i, cur);
  4394. cb(cur_up, "ffn_moe_up", il);
  4395. ggml_tensor * cur_gate = ggml_mul_mat_id(ctx0, model.layers[il].ffn_gate_exp, n_expert, selected_experts, i, cur);
  4396. cb(cur_gate, "ffn_moe_gate", il);
  4397. cur_gate = ggml_silu(ctx0, cur_gate);
  4398. cb(cur_gate, "ffn_moe_silu", il);
  4399. cur_expert = ggml_mul(ctx0, cur_up, cur_gate); // [n_tokens, n_embd]
  4400. cb(cur_expert, "ffn_moe_gate_par", il);
  4401. cur_expert = ggml_mul_mat_id(ctx0, model.layers[il].ffn_down_exp, n_expert, selected_experts, i, cur_expert); // [n_tokens, n_embd]
  4402. cb(cur_expert, "ffn_moe_down", il);
  4403. cur_expert = ggml_mul(ctx0, cur_expert,
  4404. ggml_view_2d(ctx0, weights, 1, n_tokens, weights->nb[1], i*weights->nb[0]));
  4405. cb(cur_expert, "ffn_moe_weighted", il);
  4406. if (i == 0) {
  4407. moe_out = cur_expert;
  4408. } else {
  4409. moe_out = ggml_add(ctx0, moe_out, cur_expert);
  4410. cb(moe_out, "ffn_moe_out", il);
  4411. }
  4412. }
  4413. cur = moe_out;
  4414. }
  4415. cur = ggml_add(ctx0, cur, ffn_inp);
  4416. cb(cur, "l_out", il);
  4417. // input for next layer
  4418. inpL = cur;
  4419. }
  4420. cur = inpL;
  4421. cur = llm_build_norm(ctx0, cur, hparams,
  4422. model.output_norm, NULL,
  4423. LLM_NORM_RMS, cb, -1);
  4424. cb(cur, "result_norm", -1);
  4425. // lm_head
  4426. cur = ggml_mul_mat(ctx0, model.output, cur);
  4427. cb(cur, "result_output", -1);
  4428. ggml_build_forward_expand(gf, cur);
  4429. return gf;
  4430. }
  4431. struct ggml_cgraph * build_baichuan() {
  4432. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4433. const int64_t n_embd_head = hparams.n_embd_head_v;
  4434. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4435. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4436. struct ggml_tensor * cur;
  4437. struct ggml_tensor * inpL;
  4438. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4439. cb(inpL, "inp_embd", -1);
  4440. // inp_pos - contains the positions
  4441. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4442. cb(inp_pos, "inp_pos", -1);
  4443. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4444. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4445. cb(KQ_mask, "KQ_mask", -1);
  4446. // positions of the tokens in the KV cache
  4447. struct ggml_tensor * KQ_pos = ggml_view_1d(ctx0, lctx.inp_KQ_pos, n_kv, 0);
  4448. cb(KQ_pos, "KQ_pos", -1);
  4449. // shift the entire K-cache if needed
  4450. if (do_rope_shift) {
  4451. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  4452. }
  4453. for (int il = 0; il < n_layer; ++il) {
  4454. struct ggml_tensor * inpSA = inpL;
  4455. cur = llm_build_norm(ctx0, inpL, hparams,
  4456. model.layers[il].attn_norm, NULL,
  4457. LLM_NORM_RMS, cb, il);
  4458. cb(cur, "attn_norm", il);
  4459. // self-attention
  4460. {
  4461. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4462. cb(Qcur, "Qcur", il);
  4463. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4464. cb(Kcur, "Kcur", il);
  4465. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4466. cb(Vcur, "Vcur", il);
  4467. switch (model.type) {
  4468. case MODEL_7B:
  4469. Qcur = ggml_rope_custom(
  4470. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  4471. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  4472. ext_factor, attn_factor, beta_fast, beta_slow
  4473. );
  4474. Kcur = ggml_rope_custom(
  4475. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  4476. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  4477. ext_factor, attn_factor, beta_fast, beta_slow
  4478. );
  4479. break;
  4480. case MODEL_13B:
  4481. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens);
  4482. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens);
  4483. break;
  4484. default:
  4485. GGML_ASSERT(false);
  4486. }
  4487. cb(Qcur, "Qcur", il);
  4488. cb(Kcur, "Kcur", il);
  4489. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4490. model.layers[il].wo, NULL,
  4491. Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4492. cb(cur, "kqv_out", il);
  4493. }
  4494. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4495. cb(ffn_inp, "ffn_inp", il);
  4496. // feed-forward network
  4497. {
  4498. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4499. model.layers[il].ffn_norm, NULL,
  4500. LLM_NORM_RMS, cb, il);
  4501. cb(cur, "ffn_norm", il);
  4502. cur = llm_build_ffn(ctx0, cur,
  4503. model.layers[il].ffn_up, NULL,
  4504. model.layers[il].ffn_gate, NULL,
  4505. model.layers[il].ffn_down, NULL,
  4506. NULL,
  4507. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4508. cb(cur, "ffn_out", il);
  4509. }
  4510. cur = ggml_add(ctx0, cur, ffn_inp);
  4511. cb(cur, "l_out", il);
  4512. // input for next layer
  4513. inpL = cur;
  4514. }
  4515. cur = inpL;
  4516. cur = llm_build_norm(ctx0, cur, hparams,
  4517. model.output_norm, NULL,
  4518. LLM_NORM_RMS, cb, -1);
  4519. cb(cur, "result_norm", -1);
  4520. // lm_head
  4521. cur = ggml_mul_mat(ctx0, model.output, cur);
  4522. cb(cur, "result_output", -1);
  4523. ggml_build_forward_expand(gf, cur);
  4524. return gf;
  4525. }
  4526. struct ggml_cgraph * build_falcon() {
  4527. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4528. const int64_t n_embd_head = hparams.n_embd_head_v;
  4529. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4530. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4531. GGML_ASSERT(n_embd_head == hparams.n_rot);
  4532. struct ggml_tensor * cur;
  4533. struct ggml_tensor * inpL;
  4534. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4535. cb(inpL, "inp_embd", -1);
  4536. // inp_pos - contains the positions
  4537. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4538. cb(inp_pos, "inp_pos", -1);
  4539. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4540. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4541. cb(KQ_mask, "KQ_mask", -1);
  4542. // shift the entire K-cache if needed
  4543. if (do_rope_shift) {
  4544. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  4545. }
  4546. for (int il = 0; il < n_layer; ++il) {
  4547. struct ggml_tensor * attn_norm;
  4548. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  4549. model.layers[il].attn_norm,
  4550. model.layers[il].attn_norm_b,
  4551. LLM_NORM, cb, il);
  4552. cb(attn_norm, "attn_norm", il);
  4553. // self-attention
  4554. {
  4555. if (model.layers[il].attn_norm_2) {
  4556. // Falcon-40B
  4557. cur = llm_build_norm(ctx0, inpL, hparams,
  4558. model.layers[il].attn_norm_2,
  4559. model.layers[il].attn_norm_2_b,
  4560. LLM_NORM, cb, il);
  4561. cb(cur, "attn_norm_2", il);
  4562. } else {
  4563. cur = attn_norm;
  4564. }
  4565. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4566. cb(cur, "wqkv", il);
  4567. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4568. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4569. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4570. cb(Qcur, "Qcur", il);
  4571. cb(Kcur, "Kcur", il);
  4572. cb(Vcur, "Vcur", il);
  4573. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4574. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4575. // using mode = 2 for neox mode
  4576. Qcur = ggml_rope_custom(
  4577. ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4578. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4579. );
  4580. cb(Qcur, "Qcur", il);
  4581. Kcur = ggml_rope_custom(
  4582. ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4583. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4584. );
  4585. cb(Kcur, "Kcur", il);
  4586. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4587. model.layers[il].wo, NULL,
  4588. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4589. cb(cur, "kqv_out", il);
  4590. }
  4591. struct ggml_tensor * ffn_inp = cur;
  4592. // feed forward
  4593. {
  4594. cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result
  4595. model.layers[il].ffn_up, NULL,
  4596. NULL, NULL,
  4597. model.layers[il].ffn_down, NULL,
  4598. NULL,
  4599. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4600. cb(cur, "ffn_out", il);
  4601. }
  4602. cur = ggml_add(ctx0, cur, ffn_inp);
  4603. cb(cur, "l_out", il);
  4604. cur = ggml_add(ctx0, cur, inpL);
  4605. cb(cur, "l_out", il);
  4606. // input for next layer
  4607. inpL = cur;
  4608. }
  4609. cur = inpL;
  4610. // norm
  4611. cur = llm_build_norm(ctx0, cur, hparams,
  4612. model.output_norm,
  4613. model.output_norm_b,
  4614. LLM_NORM, cb, -1);
  4615. cb(cur, "result_norm", -1);
  4616. cur = ggml_mul_mat(ctx0, model.output, cur);
  4617. cb(cur, "result_output", -1);
  4618. ggml_build_forward_expand(gf, cur);
  4619. return gf;
  4620. }
  4621. struct ggml_cgraph * build_starcoder() {
  4622. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4623. const int64_t n_embd_head = hparams.n_embd_head_v;
  4624. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4625. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4626. struct ggml_tensor * cur;
  4627. struct ggml_tensor * pos;
  4628. struct ggml_tensor * inpL;
  4629. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4630. cb(inpL, "inp_embd", -1);
  4631. // inp_pos - contains the positions
  4632. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4633. cb(inp_pos, "inp_pos", -1);
  4634. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4635. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4636. cb(KQ_mask, "KQ_mask", -1);
  4637. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  4638. cb(pos, "pos_embd", -1);
  4639. inpL = ggml_add(ctx0, inpL, pos);
  4640. cb(inpL, "inpL", -1);
  4641. for (int il = 0; il < n_layer; ++il) {
  4642. cur = llm_build_norm(ctx0, inpL, hparams,
  4643. model.layers[il].attn_norm,
  4644. model.layers[il].attn_norm_b,
  4645. LLM_NORM, cb, il);
  4646. cb(cur, "attn_norm", il);
  4647. // self-attention
  4648. {
  4649. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4650. cb(cur, "wqkv", il);
  4651. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4652. cb(cur, "bqkv", il);
  4653. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4654. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4655. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4656. cb(Qcur, "Qcur", il);
  4657. cb(Kcur, "Kcur", il);
  4658. cb(Vcur, "Vcur", il);
  4659. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4660. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4661. model.layers[il].wo, model.layers[il].bo,
  4662. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4663. cb(cur, "kqv_out", il);
  4664. }
  4665. // add the input
  4666. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4667. cb(ffn_inp, "ffn_inp", il);
  4668. // FF
  4669. {
  4670. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4671. model.layers[il].ffn_norm,
  4672. model.layers[il].ffn_norm_b,
  4673. LLM_NORM, cb, il);
  4674. cb(cur, "ffn_norm", il);
  4675. cur = llm_build_ffn(ctx0, cur,
  4676. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4677. NULL, NULL,
  4678. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4679. NULL,
  4680. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4681. cb(cur, "ffn_out", il);
  4682. }
  4683. inpL = ggml_add(ctx0, cur, ffn_inp);
  4684. cb(inpL, "l_out", il);
  4685. }
  4686. cur = llm_build_norm(ctx0, inpL, hparams,
  4687. model.output_norm,
  4688. model.output_norm_b,
  4689. LLM_NORM, cb, -1);
  4690. cb(cur, "result_norm", -1);
  4691. cur = ggml_mul_mat(ctx0, model.output, cur);
  4692. cb(cur, "result_output", -1);
  4693. ggml_build_forward_expand(gf, cur);
  4694. return gf;
  4695. }
  4696. struct ggml_cgraph * build_persimmon() {
  4697. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4698. const int64_t n_embd_head = hparams.n_embd_head_v;
  4699. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4700. GGML_ASSERT(n_embd_head/2 == hparams.n_rot);
  4701. struct ggml_tensor * cur;
  4702. struct ggml_tensor * inpL;
  4703. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4704. cb(inpL, "inp_embd", -1);
  4705. // inp_pos - contains the positions
  4706. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4707. cb(inp_pos, "inp_pos", -1);
  4708. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4709. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4710. cb(KQ_mask, "KQ_mask", -1);
  4711. if (do_rope_shift) {
  4712. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  4713. }
  4714. for (int il = 0; il < n_layer; ++il) {
  4715. struct ggml_tensor * residual = inpL;
  4716. cur = llm_build_norm(ctx0, inpL, hparams,
  4717. model.layers[il].attn_norm,
  4718. model.layers[il].attn_norm_b,
  4719. LLM_NORM, cb, il);
  4720. cb(cur, "attn_norm", il);
  4721. // self attention
  4722. {
  4723. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4724. cb(cur, "wqkv", il);
  4725. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4726. cb(cur, "bqkv", il);
  4727. // split qkv
  4728. GGML_ASSERT(n_head_kv == n_head);
  4729. struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens);
  4730. cb(tmpqkv, "tmpqkv", il);
  4731. struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2));
  4732. cb(tmpqkv_perm, "tmpqkv", il);
  4733. struct ggml_tensor * tmpq = ggml_view_3d(
  4734. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4735. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4736. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4737. 0
  4738. );
  4739. cb(tmpq, "tmpq", il);
  4740. struct ggml_tensor * tmpk = ggml_view_3d(
  4741. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4742. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4743. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4744. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens
  4745. );
  4746. cb(tmpk, "tmpk", il);
  4747. // Q/K Layernorm
  4748. tmpq = llm_build_norm(ctx0, tmpq, hparams,
  4749. model.layers[il].attn_q_norm,
  4750. model.layers[il].attn_q_norm_b,
  4751. LLM_NORM, cb, il);
  4752. cb(tmpq, "tmpq", il);
  4753. tmpk = llm_build_norm(ctx0, tmpk, hparams,
  4754. model.layers[il].attn_k_norm,
  4755. model.layers[il].attn_k_norm_b,
  4756. LLM_NORM, cb, il);
  4757. cb(tmpk, "tmpk", il);
  4758. // RoPE the first n_rot of q/k, pass the other half, and concat.
  4759. struct ggml_tensor * qrot = ggml_view_3d(
  4760. ctx0, tmpq, hparams.n_rot, n_head, n_tokens,
  4761. ggml_element_size(tmpq) * n_embd_head,
  4762. ggml_element_size(tmpq) * n_embd_head * n_head,
  4763. 0
  4764. );
  4765. cb(qrot, "qrot", il);
  4766. struct ggml_tensor * krot = ggml_view_3d(
  4767. ctx0, tmpk, hparams.n_rot, n_head, n_tokens,
  4768. ggml_element_size(tmpk) * n_embd_head,
  4769. ggml_element_size(tmpk) * n_embd_head * n_head,
  4770. 0
  4771. );
  4772. cb(krot, "krot", il);
  4773. // get the second half of tmpq, e.g tmpq[n_rot:, :, :]
  4774. struct ggml_tensor * qpass = ggml_view_3d(
  4775. ctx0, tmpq, hparams.n_rot, n_head, n_tokens,
  4776. ggml_element_size(tmpq) * n_embd_head,
  4777. ggml_element_size(tmpq) * n_embd_head * n_head,
  4778. ggml_element_size(tmpq) * hparams.n_rot
  4779. );
  4780. cb(qpass, "qpass", il);
  4781. struct ggml_tensor * kpass = ggml_view_3d(
  4782. ctx0, tmpk, hparams.n_rot, n_head, n_tokens,
  4783. ggml_element_size(tmpk) * n_embd_head,
  4784. ggml_element_size(tmpk) * n_embd_head * n_head,
  4785. ggml_element_size(tmpk) * hparams.n_rot
  4786. );
  4787. cb(kpass, "kpass", il);
  4788. struct ggml_tensor * qrotated = ggml_rope_custom(
  4789. ctx0, qrot, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4790. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4791. );
  4792. cb(qrotated, "qrotated", il);
  4793. struct ggml_tensor * krotated = ggml_rope_custom(
  4794. ctx0, krot, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4795. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4796. );
  4797. cb(krotated, "krotated", il);
  4798. // ggml currently only supports concatenation on dim=2
  4799. // so we need to permute qrot, qpass, concat, then permute back.
  4800. qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3));
  4801. cb(qrotated, "qrotated", il);
  4802. krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3));
  4803. cb(krotated, "krotated", il);
  4804. qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3));
  4805. cb(qpass, "qpass", il);
  4806. kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3));
  4807. cb(kpass, "kpass", il);
  4808. struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass);
  4809. cb(Qcur, "Qcur", il);
  4810. struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass);
  4811. cb(Kcur, "Kcur", il);
  4812. struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 2, 1, 0, 3));
  4813. cb(Q, "Q", il);
  4814. Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3));
  4815. cb(Kcur, "Kcur", il);
  4816. struct ggml_tensor * Vcur = ggml_view_3d(
  4817. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4818. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4819. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4820. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2
  4821. );
  4822. cb(Vcur, "Vcur", il);
  4823. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4824. model.layers[il].wo, model.layers[il].bo,
  4825. Kcur, Vcur, Q, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4826. cb(cur, "kqv_out", il);
  4827. }
  4828. struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
  4829. cb(ffn_inp, "ffn_inp", il);
  4830. // feed-forward network
  4831. {
  4832. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4833. model.layers[il].ffn_norm,
  4834. model.layers[il].ffn_norm_b,
  4835. LLM_NORM, cb, il);
  4836. cb(cur, "ffn_norm", il);
  4837. cur = llm_build_ffn(ctx0, cur,
  4838. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4839. NULL, NULL,
  4840. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4841. NULL,
  4842. LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il);
  4843. cb(cur, "ffn_out", il);
  4844. }
  4845. cur = ggml_add(ctx0, cur, ffn_inp);
  4846. cb(cur, "l_out", il);
  4847. inpL = cur;
  4848. }
  4849. cur = inpL;
  4850. cur = llm_build_norm(ctx0, cur, hparams,
  4851. model.output_norm,
  4852. model.output_norm_b,
  4853. LLM_NORM, cb, -1);
  4854. cb(cur, "result_norm", -1);
  4855. cur = ggml_mul_mat(ctx0, model.output, cur);
  4856. cb(cur, "result_output", -1);
  4857. ggml_build_forward_expand(gf, cur);
  4858. return gf;
  4859. }
  4860. struct ggml_cgraph * build_refact() {
  4861. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4862. const int64_t n_embd_head = hparams.n_embd_head_v;
  4863. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4864. struct ggml_tensor * cur;
  4865. struct ggml_tensor * inpL;
  4866. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4867. cb(inpL, "inp_embd", -1);
  4868. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4869. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4870. cb(KQ_mask, "KQ_mask", -1);
  4871. // positions of the tokens in the KV cache
  4872. struct ggml_tensor * KQ_pos = ggml_view_1d(ctx0, lctx.inp_KQ_pos, n_kv, 0);
  4873. cb(KQ_pos, "KQ_pos", -1);
  4874. for (int il = 0; il < n_layer; ++il) {
  4875. struct ggml_tensor * inpSA = inpL;
  4876. cur = llm_build_norm(ctx0, inpL, hparams,
  4877. model.layers[il].attn_norm, NULL,
  4878. LLM_NORM_RMS, cb, il);
  4879. cb(cur, "attn_norm", il);
  4880. // self-attention
  4881. {
  4882. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4883. cb(Qcur, "Qcur", il);
  4884. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4885. cb(Kcur, "Kcur", il);
  4886. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4887. cb(Vcur, "Vcur", il);
  4888. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4889. cb(Kcur, "Kcur", il);
  4890. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4891. cb(Qcur, "Qcur", il);
  4892. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4893. model.layers[il].wo, NULL,
  4894. Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4895. cb(cur, "kqv_out", il);
  4896. }
  4897. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4898. cb(ffn_inp, "ffn_inp", il);
  4899. // feed-forward network
  4900. {
  4901. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4902. model.layers[il].ffn_norm, NULL,
  4903. LLM_NORM_RMS, cb, il);
  4904. cb(cur, "ffn_norm", il);
  4905. cur = llm_build_ffn(ctx0, cur,
  4906. model.layers[il].ffn_up, NULL,
  4907. model.layers[il].ffn_gate, NULL,
  4908. model.layers[il].ffn_down, NULL,
  4909. NULL,
  4910. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4911. cb(cur, "ffn_out", il);
  4912. }
  4913. cur = ggml_add(ctx0, cur, ffn_inp);
  4914. cb(cur, "l_out", il);
  4915. // input for next layer
  4916. inpL = cur;
  4917. }
  4918. cur = inpL;
  4919. cur = llm_build_norm(ctx0, cur, hparams,
  4920. model.output_norm, NULL,
  4921. LLM_NORM_RMS, cb, -1);
  4922. cb(cur, "result_norm", -1);
  4923. // lm_head
  4924. cur = ggml_mul_mat(ctx0, model.output, cur);
  4925. cb(cur, "result_output", -1);
  4926. ggml_build_forward_expand(gf, cur);
  4927. return gf;
  4928. }
  4929. struct ggml_cgraph * build_bert() {
  4930. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4931. const int64_t n_embd_head = hparams.n_embd_head_v;
  4932. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4933. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4934. struct ggml_tensor * cur;
  4935. struct ggml_tensor * inpL;
  4936. // get input vectors with right size
  4937. const size_t stride1 = n_tokens * ggml_type_size(lctx.inp_tokens->type);
  4938. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  4939. struct ggml_tensor * inp_mean = ggml_view_2d(ctx0, lctx.inp_mean, n_tokens, n_tokens, stride1, 0);
  4940. struct ggml_tensor * inp_cls = ggml_view_1d(ctx0, lctx.inp_cls, n_tokens, 0);
  4941. // construct input embeddings (token, type, position)
  4942. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  4943. // token types are hardcoded to zero ("Sentence A")
  4944. struct ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0);
  4945. inpL = ggml_add(ctx0, inpL, type_row0);
  4946. if (model.arch == LLM_ARCH_BERT) {
  4947. inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL);
  4948. }
  4949. cb(inpL, "inp_embd", -1);
  4950. // embed layer norm
  4951. inpL = llm_build_norm(ctx0, inpL, hparams, model.tok_norm, model.tok_norm_b, LLM_NORM, cb, -1);
  4952. cb(inpL, "inp_norm", -1);
  4953. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4954. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  4955. cb(KQ_mask, "KQ_mask", -1); // [n_kv, n_tokens]
  4956. // iterate layers
  4957. for (int il = 0; il < n_layer; ++il) {
  4958. struct ggml_tensor * cur = inpL;
  4959. // self-attention
  4960. if (model.arch == LLM_ARCH_BERT) {
  4961. struct ggml_tensor * Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq);
  4962. cb(Qcur, "Qcur", il);
  4963. struct ggml_tensor * Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), model.layers[il].bk);
  4964. cb(Kcur, "Kcur", il);
  4965. struct ggml_tensor * Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), model.layers[il].bv);
  4966. cb(Vcur, "Vcur", il);
  4967. // seems like we just need to do this for Q?
  4968. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4969. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4970. model.layers[il].wo, model.layers[il].bo,
  4971. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4972. cb(cur, "kqv_out", il);
  4973. } else {
  4974. // compute Q and K and RoPE them
  4975. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4976. cb(cur, "wqkv", il);
  4977. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4978. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4979. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4980. cb(Qcur, "Qcur", il);
  4981. cb(Kcur, "Kcur", il);
  4982. cb(Vcur, "Vcur", il);
  4983. Qcur = ggml_rope_custom(
  4984. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  4985. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4986. ext_factor, attn_factor, beta_fast, beta_slow
  4987. );
  4988. cb(Qcur, "Qcur", il);
  4989. Kcur = ggml_rope_custom(
  4990. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  4991. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4992. ext_factor, attn_factor, beta_fast, beta_slow
  4993. );
  4994. cb(Kcur, "Kcur", il);
  4995. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  4996. model.layers[il].wo, model.layers[il].bo,
  4997. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4998. cb(cur, "kqv_out", il);
  4999. }
  5000. // re-add the layer input
  5001. cur = ggml_add(ctx0, cur, inpL);
  5002. // attention layer norm
  5003. cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, cb, il);
  5004. struct ggml_tensor * ffn_inp = cur;
  5005. cb(ffn_inp, "ffn_inp", il);
  5006. // feed-forward network
  5007. if (model.arch == LLM_ARCH_BERT) {
  5008. cur = llm_build_ffn(ctx0, cur,
  5009. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  5010. NULL, NULL,
  5011. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  5012. NULL,
  5013. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5014. } else {
  5015. cur = llm_build_ffn(ctx0, cur,
  5016. model.layers[il].ffn_up, NULL,
  5017. model.layers[il].ffn_gate, NULL,
  5018. model.layers[il].ffn_down, NULL,
  5019. NULL,
  5020. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5021. }
  5022. cb(cur, "ffn_out", il);
  5023. // attentions bypass the intermediate layer
  5024. cur = ggml_add(ctx0, cur, ffn_inp);
  5025. // output layer norm
  5026. cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, cb, il);
  5027. // input for next layer
  5028. inpL = cur;
  5029. }
  5030. // final output
  5031. cur = inpL;
  5032. // pooling layer
  5033. if (pooling_type == LLAMA_POOLING_MEAN) {
  5034. cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, cur)), inp_mean);
  5035. } else if (pooling_type == LLAMA_POOLING_CLS) {
  5036. cur = ggml_get_rows(ctx0, cur, inp_cls);
  5037. } else {
  5038. GGML_ASSERT(pooling_type == LLAMA_POOLING_NONE && "Invalid pooling type");
  5039. }
  5040. cb(cur, "result_embd", -1);
  5041. ggml_build_forward_expand(gf, cur);
  5042. return gf;
  5043. }
  5044. struct ggml_cgraph * build_bloom() {
  5045. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5046. const int64_t n_embd_head = hparams.n_embd_head_v;
  5047. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5048. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5049. struct ggml_tensor * cur;
  5050. struct ggml_tensor * inpL;
  5051. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5052. cb(inpL, "inp_embd", -1);
  5053. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5054. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5055. cb(KQ_mask, "KQ_mask", -1);
  5056. // positions of the tokens in the KV cache
  5057. struct ggml_tensor * KQ_pos = ggml_view_1d(ctx0, lctx.inp_KQ_pos, n_kv, 0);
  5058. cb(KQ_pos, "KQ_pos", -1);
  5059. inpL = llm_build_norm(ctx0, inpL, hparams,
  5060. model.tok_norm,
  5061. model.tok_norm_b,
  5062. LLM_NORM, cb, -1);
  5063. cb(inpL, "inp_norm", -1);
  5064. for (int il = 0; il < n_layer; ++il) {
  5065. cur = llm_build_norm(ctx0, inpL, hparams,
  5066. model.layers[il].attn_norm,
  5067. model.layers[il].attn_norm_b,
  5068. LLM_NORM, cb, il);
  5069. cb(cur, "attn_norm", il);
  5070. // self-attention
  5071. {
  5072. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  5073. cb(cur, "wqkv", il);
  5074. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5075. cb(cur, "bqkv", il);
  5076. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5077. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5078. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5079. cb(Qcur, "Qcur", il);
  5080. cb(Kcur, "Kcur", il);
  5081. cb(Vcur, "Vcur", il);
  5082. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5083. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5084. model.layers[il].wo, model.layers[il].bo,
  5085. Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5086. cb(cur, "kqv_out", il);
  5087. }
  5088. // Add the input
  5089. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5090. cb(ffn_inp, "ffn_inp", il);
  5091. // FF
  5092. {
  5093. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5094. model.layers[il].ffn_norm,
  5095. model.layers[il].ffn_norm_b,
  5096. LLM_NORM, cb, il);
  5097. cb(cur, "ffn_norm", il);
  5098. cur = llm_build_ffn(ctx0, cur,
  5099. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  5100. NULL, NULL,
  5101. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  5102. NULL,
  5103. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5104. cb(cur, "ffn_out", il);
  5105. }
  5106. inpL = ggml_add(ctx0, cur, ffn_inp);
  5107. cb(inpL, "l_out", il);
  5108. }
  5109. cur = llm_build_norm(ctx0, inpL, hparams,
  5110. model.output_norm,
  5111. model.output_norm_b,
  5112. LLM_NORM, cb, -1);
  5113. cb(cur, "result_norm", -1);
  5114. cur = ggml_mul_mat(ctx0, model.output, cur);
  5115. cb(cur, "result_output", -1);
  5116. ggml_build_forward_expand(gf, cur);
  5117. return gf;
  5118. }
  5119. struct ggml_cgraph * build_mpt() {
  5120. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5121. const int64_t n_embd_head = hparams.n_embd_head_v;
  5122. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5123. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5124. struct ggml_tensor * cur;
  5125. struct ggml_tensor * inpL;
  5126. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5127. cb(inpL, "inp_embd", -1);
  5128. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5129. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5130. cb(KQ_mask, "KQ_mask", -1);
  5131. // positions of the tokens in the KV cache
  5132. struct ggml_tensor * KQ_pos = ggml_view_1d(ctx0, lctx.inp_KQ_pos, n_kv, 0);
  5133. cb(KQ_pos, "KQ_pos", -1);
  5134. for (int il = 0; il < n_layer; ++il) {
  5135. struct ggml_tensor * attn_norm;
  5136. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  5137. model.layers[il].attn_norm,
  5138. NULL,
  5139. LLM_NORM, cb, il);
  5140. cb(attn_norm, "attn_norm", il);
  5141. // self-attention
  5142. {
  5143. cur = attn_norm;
  5144. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  5145. cb(cur, "wqkv", il);
  5146. if (hparams.f_clamp_kqv > 0.0f) {
  5147. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  5148. cb(cur, "wqkv_clamped", il);
  5149. }
  5150. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5151. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5152. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5153. cb(Qcur, "Qcur", il);
  5154. cb(Kcur, "Kcur", il);
  5155. cb(Vcur, "Vcur", il);
  5156. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5157. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5158. model.layers[il].wo, NULL,
  5159. Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5160. cb(cur, "kqv_out", il);
  5161. }
  5162. // Add the input
  5163. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5164. cb(ffn_inp, "ffn_inp", il);
  5165. // feed forward
  5166. {
  5167. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5168. model.layers[il].ffn_norm,
  5169. NULL,
  5170. LLM_NORM, cb, il);
  5171. cb(cur, "ffn_norm", il);
  5172. cur = llm_build_ffn(ctx0, cur,
  5173. model.layers[il].ffn_up, NULL,
  5174. NULL, NULL,
  5175. model.layers[il].ffn_down, NULL,
  5176. model.layers[il].ffn_act,
  5177. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5178. cb(cur, "ffn_out", il);
  5179. }
  5180. cur = ggml_add(ctx0, cur, ffn_inp);
  5181. cb(cur, "l_out", il);
  5182. // input for next layer
  5183. inpL = cur;
  5184. }
  5185. cur = inpL;
  5186. cur = llm_build_norm(ctx0, cur, hparams,
  5187. model.output_norm,
  5188. NULL,
  5189. LLM_NORM, cb, -1);
  5190. cb(cur, "result_norm", -1);
  5191. cur = ggml_mul_mat(ctx0, model.output, cur);
  5192. cb(cur, "result_output", -1);
  5193. ggml_build_forward_expand(gf, cur);
  5194. return gf;
  5195. }
  5196. struct ggml_cgraph * build_stablelm() {
  5197. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  5198. const int64_t n_embd_head = hparams.n_embd_head_v;
  5199. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5200. struct ggml_tensor * cur;
  5201. struct ggml_tensor * inpL;
  5202. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5203. cb(inpL, "inp_embd", -1);
  5204. // inp_pos - contains the positions
  5205. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5206. cb(inp_pos, "inp_pos", -1);
  5207. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5208. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5209. cb(KQ_mask, "KQ_mask", -1);
  5210. // shift the entire K-cache if needed
  5211. if (do_rope_shift) {
  5212. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  5213. }
  5214. for (int il = 0; il < n_layer; ++il) {
  5215. struct ggml_tensor * inpSA = inpL;
  5216. // norm
  5217. cur = llm_build_norm(ctx0, inpL, hparams,
  5218. model.layers[il].attn_norm,
  5219. model.layers[il].attn_norm_b,
  5220. LLM_NORM, cb, il);
  5221. cb(cur, "attn_norm", il);
  5222. // self-attention
  5223. {
  5224. // compute Q and K and RoPE them
  5225. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  5226. cb(Qcur, "Qcur", il);
  5227. if (model.layers[il].bq) {
  5228. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5229. cb(Qcur, "Qcur", il);
  5230. }
  5231. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  5232. cb(Kcur, "Kcur", il);
  5233. if (model.layers[il].bk) {
  5234. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5235. cb(Kcur, "Kcur", il);
  5236. }
  5237. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  5238. cb(Vcur, "Vcur", il);
  5239. if (model.layers[il].bv) {
  5240. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5241. cb(Vcur, "Vcur", il);
  5242. }
  5243. Qcur = ggml_rope_custom(
  5244. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  5245. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5246. ext_factor, attn_factor, beta_fast, beta_slow
  5247. );
  5248. cb(Qcur, "Qcur", il);
  5249. Kcur = ggml_rope_custom(
  5250. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  5251. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5252. ext_factor, attn_factor, beta_fast, beta_slow
  5253. );
  5254. cb(Kcur, "Kcur", il);
  5255. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5256. model.layers[il].wo, NULL,
  5257. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5258. cb(cur, "kqv_out", il);
  5259. }
  5260. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5261. cb(ffn_inp, "ffn_inp", il);
  5262. // feed-forward network
  5263. {
  5264. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5265. model.layers[il].ffn_norm,
  5266. model.layers[il].ffn_norm_b,
  5267. LLM_NORM, cb, il);
  5268. cb(cur, "ffn_norm", il);
  5269. cur = llm_build_ffn(ctx0, cur,
  5270. model.layers[il].ffn_up, NULL,
  5271. model.layers[il].ffn_gate, NULL,
  5272. model.layers[il].ffn_down, NULL,
  5273. NULL,
  5274. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5275. cb(cur, "ffn_out", il);
  5276. }
  5277. cur = ggml_add(ctx0, cur, ffn_inp);
  5278. cb(cur, "l_out", il);
  5279. // input for next layer
  5280. inpL = cur;
  5281. }
  5282. cur = inpL;
  5283. cur = llm_build_norm(ctx0, cur, hparams,
  5284. model.output_norm,
  5285. model.output_norm_b,
  5286. LLM_NORM, cb, -1);
  5287. cb(cur, "result_norm", -1);
  5288. // lm_head
  5289. cur = ggml_mul_mat(ctx0, model.output, cur);
  5290. cb(cur, "result_output", -1);
  5291. ggml_build_forward_expand(gf, cur);
  5292. return gf;
  5293. }
  5294. struct ggml_cgraph * build_qwen() {
  5295. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5296. const int64_t n_embd_head = hparams.n_embd_head_v;
  5297. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5298. struct ggml_tensor * cur;
  5299. struct ggml_tensor * inpL;
  5300. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5301. cb(inpL, "inp_embd", -1);
  5302. // inp_pos - contains the positions
  5303. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5304. cb(inp_pos, "inp_pos", -1);
  5305. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5306. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5307. cb(KQ_mask, "KQ_mask", -1);
  5308. // shift the entire K-cache if needed
  5309. if (do_rope_shift) {
  5310. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  5311. }
  5312. for (int il = 0; il < n_layer; ++il) {
  5313. struct ggml_tensor * inpSA = inpL;
  5314. cur = llm_build_norm(ctx0, inpL, hparams,
  5315. model.layers[il].attn_norm, NULL,
  5316. LLM_NORM_RMS, cb, il);
  5317. cb(cur, "attn_norm", il);
  5318. // self-attention
  5319. {
  5320. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  5321. cb(cur, "wqkv", il);
  5322. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5323. cb(cur, "bqkv", il);
  5324. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5325. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5326. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd)));
  5327. cb(Qcur, "Qcur", il);
  5328. cb(Kcur, "Kcur", il);
  5329. cb(Vcur, "Vcur", il);
  5330. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5331. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5332. // using mode = 2 for neox mode
  5333. Qcur = ggml_rope_custom(
  5334. ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  5335. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  5336. );
  5337. cb(Qcur, "Qcur", il);
  5338. Kcur = ggml_rope_custom(
  5339. ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  5340. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  5341. );
  5342. cb(Kcur, "Kcur", il);
  5343. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5344. model.layers[il].wo, NULL,
  5345. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5346. cb(cur, "kqv_out", il);
  5347. }
  5348. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5349. cb(ffn_inp, "ffn_inp", il);
  5350. // feed-forward forward
  5351. {
  5352. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5353. model.layers[il].ffn_norm, NULL,
  5354. LLM_NORM_RMS, cb, il);
  5355. cb(cur, "ffn_norm", il);
  5356. cur = llm_build_ffn(ctx0, cur,
  5357. model.layers[il].ffn_up, NULL,
  5358. model.layers[il].ffn_gate, NULL,
  5359. model.layers[il].ffn_down, NULL,
  5360. NULL,
  5361. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5362. cb(cur, "ffn_out", il);
  5363. }
  5364. cur = ggml_add(ctx0, cur, ffn_inp);
  5365. cb(cur, "l_out", il);
  5366. // input for next layer
  5367. inpL = cur;
  5368. }
  5369. cur = inpL;
  5370. cur = llm_build_norm(ctx0, cur, hparams,
  5371. model.output_norm, NULL,
  5372. LLM_NORM_RMS, cb, -1);
  5373. cb(cur, "result_norm", -1);
  5374. // lm_head
  5375. cur = ggml_mul_mat(ctx0, model.output, cur);
  5376. cb(cur, "result_output", -1);
  5377. ggml_build_forward_expand(gf, cur);
  5378. return gf;
  5379. }
  5380. struct ggml_cgraph * build_qwen2() {
  5381. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5382. const int64_t n_embd_head = hparams.n_embd_head_v;
  5383. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5384. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5385. struct ggml_tensor * cur;
  5386. struct ggml_tensor * inpL;
  5387. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5388. cb(inpL, "inp_embd", -1);
  5389. // inp_pos - contains the positions
  5390. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5391. cb(inp_pos, "inp_pos", -1);
  5392. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5393. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5394. cb(KQ_mask, "KQ_mask", -1);
  5395. // shift the entire K-cache if needed
  5396. if (do_rope_shift) {
  5397. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  5398. }
  5399. for (int il = 0; il < n_layer; ++il) {
  5400. struct ggml_tensor * inpSA = inpL;
  5401. // norm
  5402. cur = llm_build_norm(ctx0, inpL, hparams,
  5403. model.layers[il].attn_norm, NULL,
  5404. LLM_NORM_RMS, cb, il);
  5405. cb(cur, "attn_norm", il);
  5406. // self-attention
  5407. {
  5408. // compute Q and K and RoPE them
  5409. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  5410. cb(Qcur, "Qcur", il);
  5411. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5412. cb(Qcur, "Qcur", il);
  5413. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  5414. cb(Kcur, "Kcur", il);
  5415. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5416. cb(Kcur, "Kcur", il);
  5417. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  5418. cb(Vcur, "Vcur", il);
  5419. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5420. cb(Vcur, "Vcur", il);
  5421. // these nodes are added to the graph together so that they are not reordered
  5422. // by doing so, the number of splits in the graph is reduced
  5423. ggml_build_forward_expand(gf, Qcur);
  5424. ggml_build_forward_expand(gf, Kcur);
  5425. ggml_build_forward_expand(gf, Vcur);
  5426. Qcur = ggml_rope_custom(
  5427. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  5428. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5429. ext_factor, attn_factor, beta_fast, beta_slow
  5430. );
  5431. cb(Qcur, "Qcur", il);
  5432. Kcur = ggml_rope_custom(
  5433. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  5434. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5435. ext_factor, attn_factor, beta_fast, beta_slow
  5436. );
  5437. cb(Kcur, "Kcur", il);
  5438. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5439. model.layers[il].wo, model.layers[il].bo,
  5440. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5441. cb(cur, "kqv_out", il);
  5442. }
  5443. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5444. cb(ffn_inp, "ffn_inp", il);
  5445. // feed-forward network
  5446. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5447. model.layers[il].ffn_norm, NULL,
  5448. LLM_NORM_RMS, cb, il);
  5449. cb(cur, "ffn_norm", il);
  5450. cur = llm_build_ffn(ctx0, cur,
  5451. model.layers[il].ffn_up, NULL,
  5452. model.layers[il].ffn_gate, NULL,
  5453. model.layers[il].ffn_down, NULL,
  5454. NULL,
  5455. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5456. cb(cur, "ffn_out", il);
  5457. cur = ggml_add(ctx0, cur, ffn_inp);
  5458. cb(cur, "l_out", il);
  5459. // input for next layer
  5460. inpL = cur;
  5461. }
  5462. cur = inpL;
  5463. cur = llm_build_norm(ctx0, cur, hparams,
  5464. model.output_norm, NULL,
  5465. LLM_NORM_RMS, cb, -1);
  5466. cb(cur, "result_norm", -1);
  5467. // lm_head
  5468. cur = ggml_mul_mat(ctx0, model.output, cur);
  5469. cb(cur, "result_output", -1);
  5470. ggml_build_forward_expand(gf, cur);
  5471. return gf;
  5472. }
  5473. struct ggml_cgraph * build_phi2() {
  5474. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5475. const int64_t n_embd_head = hparams.n_embd_head_v;
  5476. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5477. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5478. struct ggml_tensor * cur;
  5479. struct ggml_tensor * attn_norm_output;
  5480. struct ggml_tensor * ffn_output;
  5481. struct ggml_tensor * inpL;
  5482. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5483. cb(inpL, "inp_embd", -1);
  5484. // inp_pos - contains the positions
  5485. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5486. cb(inp_pos, "inp_pos", -1);
  5487. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5488. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5489. cb(KQ_mask, "KQ_mask", -1);
  5490. // shift the entire K-cache if needed
  5491. if (do_rope_shift) {
  5492. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE_NEOX, n_ctx, freq_base, freq_scale, cb);
  5493. }
  5494. for (int il = 0; il < n_layer; ++il) {
  5495. attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
  5496. model.layers[il].attn_norm,
  5497. model.layers[il].attn_norm_b,
  5498. LLM_NORM, cb, il);
  5499. cb(attn_norm_output, "attn_norm", il);
  5500. // self-attention
  5501. {
  5502. struct ggml_tensor * Qcur = nullptr;
  5503. struct ggml_tensor * Kcur = nullptr;
  5504. struct ggml_tensor * Vcur = nullptr;
  5505. if (model.layers[il].wqkv) {
  5506. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output);
  5507. cb(cur, "wqkv", il);
  5508. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5509. cb(cur, "bqkv", il);
  5510. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5511. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5512. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5513. } else {
  5514. Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq);
  5515. Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk);
  5516. Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv);
  5517. }
  5518. cb(Qcur, "Qcur", il);
  5519. cb(Kcur, "Kcur", il);
  5520. cb(Vcur, "Vcur", il);
  5521. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5522. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  5523. Qcur = ggml_rope_custom(
  5524. ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  5525. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  5526. );
  5527. cb(Qcur, "Qcur", il);
  5528. // with phi2, we scale the Q to avoid precision issues
  5529. // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
  5530. Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head)));
  5531. cb(Qcur, "Qcur", il);
  5532. Kcur = ggml_rope_custom(
  5533. ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  5534. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  5535. );
  5536. cb(Kcur, "Kcur", il);
  5537. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5538. model.layers[il].wo, model.layers[il].bo,
  5539. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f, cb, il);
  5540. cb(cur, "kqv_out", il);
  5541. }
  5542. // FF
  5543. {
  5544. ffn_output = llm_build_ffn(ctx0, attn_norm_output,
  5545. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  5546. NULL, NULL,
  5547. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  5548. NULL,
  5549. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5550. cb(ffn_output, "ffn_out", il);
  5551. }
  5552. cur = ggml_add(ctx0, cur, ffn_output);
  5553. cb(cur, "l_out", il);
  5554. cur = ggml_add(ctx0, cur, inpL);
  5555. cb(cur, "l_out", il);
  5556. inpL = cur;
  5557. }
  5558. cur = llm_build_norm(ctx0, inpL, hparams,
  5559. model.output_norm,
  5560. model.output_norm_b,
  5561. LLM_NORM, cb, -1);
  5562. cb(cur, "result_norm", -1);
  5563. cur = ggml_mul_mat(ctx0, model.output, cur);
  5564. cb(cur, "result_output_no_bias", -1);
  5565. cur = ggml_add(ctx0, cur, model.output_b);
  5566. cb(cur, "result_output", -1);
  5567. ggml_build_forward_expand(gf, cur);
  5568. return gf;
  5569. }
  5570. struct ggml_cgraph * build_plamo() {
  5571. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  5572. const int64_t n_embd_head = hparams.n_embd_head_v;
  5573. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5574. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5575. struct ggml_tensor * cur;
  5576. struct ggml_tensor * inpL;
  5577. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5578. cb(inpL, "inp_embd", -1);
  5579. // inp_pos - contains the positions
  5580. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5581. cb(inp_pos, "inp_pos", -1);
  5582. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5583. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5584. cb(KQ_mask, "KQ_mask", -1);
  5585. // shift the entire K-cache if needed
  5586. if (do_rope_shift) {
  5587. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  5588. }
  5589. for (int il = 0; il < n_layer; ++il) {
  5590. // norm
  5591. cur = llm_build_norm(ctx0, inpL, hparams,
  5592. model.layers[il].attn_norm, NULL,
  5593. LLM_NORM_RMS, cb, il);
  5594. cb(cur, "attn_norm", il);
  5595. struct ggml_tensor * attention_norm = cur;
  5596. // self-attention
  5597. {
  5598. // compute Q and K and RoPE them
  5599. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  5600. cb(Qcur, "Qcur", il);
  5601. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  5602. cb(Kcur, "Kcur", il);
  5603. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  5604. cb(Vcur, "Vcur", il);
  5605. Qcur = ggml_rope_custom(
  5606. ctx0, ggml_reshape_3d(ctx0, Qcur, hparams.n_rot, n_head, n_tokens), inp_pos,
  5607. n_embd_head, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5608. ext_factor, attn_factor, beta_fast, beta_slow);
  5609. cb(Qcur, "Qcur", il);
  5610. Kcur = ggml_rope_custom(
  5611. ctx0, ggml_reshape_3d(ctx0, Kcur, hparams.n_rot, n_head_kv, n_tokens), inp_pos,
  5612. n_embd_head, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5613. ext_factor, attn_factor, beta_fast, beta_slow);
  5614. cb(Kcur, "Kcur", il);
  5615. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5616. model.layers[il].wo, NULL,
  5617. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5618. cb(cur, "kqv_out", il);
  5619. }
  5620. struct ggml_tensor * sa_out = cur;
  5621. cur = attention_norm;
  5622. // feed-forward network
  5623. {
  5624. cur = llm_build_ffn(ctx0, cur,
  5625. model.layers[il].ffn_up, NULL,
  5626. model.layers[il].ffn_gate, NULL,
  5627. model.layers[il].ffn_down, NULL,
  5628. NULL,
  5629. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5630. cb(cur, "ffn_out", il);
  5631. }
  5632. cur = ggml_add(ctx0, cur, sa_out);
  5633. cb(cur, "l_out", il);
  5634. cur = ggml_add(ctx0, cur, inpL);
  5635. cb(cur, "l_out", il);
  5636. // input for next layer
  5637. inpL = cur;
  5638. }
  5639. cur = inpL;
  5640. cur = llm_build_norm(ctx0, cur, hparams,
  5641. model.output_norm, NULL,
  5642. LLM_NORM_RMS, cb, -1);
  5643. cb(cur, "result_norm", -1);
  5644. // lm_head
  5645. cur = ggml_mul_mat(ctx0, model.output, cur);
  5646. cb(cur, "result_output", -1);
  5647. ggml_build_forward_expand(gf, cur);
  5648. return gf;
  5649. }
  5650. struct ggml_cgraph * build_gpt2() {
  5651. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5652. const int64_t n_embd_head = hparams.n_embd_head_v;
  5653. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5654. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5655. struct ggml_tensor * cur;
  5656. struct ggml_tensor * pos;
  5657. struct ggml_tensor * inpL;
  5658. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5659. cb(inpL, "inp_embd", -1);
  5660. // inp_pos - contains the positions
  5661. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5662. cb(inp_pos, "inp_pos", -1);
  5663. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5664. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5665. cb(KQ_mask, "KQ_mask", -1);
  5666. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  5667. cb(pos, "pos_embd", -1);
  5668. inpL = ggml_add(ctx0, inpL, pos);
  5669. cb(inpL, "inpL", -1);
  5670. for (int il = 0; il < n_layer; ++il) {
  5671. cur = llm_build_norm(ctx0, inpL, hparams,
  5672. model.layers[il].attn_norm,
  5673. model.layers[il].attn_norm_b,
  5674. LLM_NORM, cb, il);
  5675. cb(cur, "attn_norm", il);
  5676. // self-attention
  5677. {
  5678. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  5679. cb(cur, "wqkv", il);
  5680. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5681. cb(cur, "bqkv", il);
  5682. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5683. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5684. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5685. cb(Qcur, "Qcur", il);
  5686. cb(Kcur, "Kcur", il);
  5687. cb(Vcur, "Vcur", il);
  5688. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  5689. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5690. model.layers[il].wo, model.layers[il].bo,
  5691. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5692. cb(cur, "kqv_out", il);
  5693. }
  5694. // add the input
  5695. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5696. cb(ffn_inp, "ffn_inp", il);
  5697. // FF
  5698. {
  5699. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5700. model.layers[il].ffn_norm,
  5701. model.layers[il].ffn_norm_b,
  5702. LLM_NORM, cb, il);
  5703. cb(cur, "ffn_norm", il);
  5704. cur = llm_build_ffn(ctx0, cur,
  5705. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  5706. NULL, NULL,
  5707. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  5708. NULL,
  5709. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5710. cb(cur, "ffn_out", il);
  5711. }
  5712. inpL = ggml_add(ctx0, cur, ffn_inp);
  5713. cb(inpL, "l_out", il);
  5714. }
  5715. cur = llm_build_norm(ctx0, inpL, hparams,
  5716. model.output_norm,
  5717. model.output_norm_b,
  5718. LLM_NORM, cb, -1);
  5719. cb(cur, "result_norm", -1);
  5720. cur = ggml_mul_mat(ctx0, model.output, cur);
  5721. cb(cur, "result_output", -1);
  5722. ggml_build_forward_expand(gf, cur);
  5723. return gf;
  5724. }
  5725. struct ggml_cgraph * build_codeshell() {
  5726. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5727. const int64_t n_embd_head = hparams.n_embd_head_v;
  5728. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  5729. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5730. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5731. struct ggml_tensor * cur;
  5732. struct ggml_tensor * inpL;
  5733. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5734. cb(inpL, "inp_embd", -1);
  5735. // inp_pos - contains the positions
  5736. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5737. cb(inp_pos, "inp_pos", -1);
  5738. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5739. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5740. cb(KQ_mask, "KQ_mask", -1);
  5741. // shift the entire K-cache if needed
  5742. if (do_rope_shift) {
  5743. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  5744. }
  5745. for (int il = 0; il < n_layer; ++il) {
  5746. cur = llm_build_norm(ctx0, inpL, hparams,
  5747. model.layers[il].attn_norm,
  5748. model.layers[il].attn_norm_b,
  5749. LLM_NORM, cb, il);
  5750. cb(cur, "attn_norm", il);
  5751. // self-attention
  5752. {
  5753. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  5754. cb(cur, "wqkv", il);
  5755. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  5756. cb(cur, "bqkv", il);
  5757. struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  5758. struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  5759. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  5760. cb(tmpq, "tmpq", il);
  5761. cb(tmpk, "tmpk", il);
  5762. cb(Vcur, "Vcur", il);
  5763. struct ggml_tensor * Qcur = ggml_rope_custom(
  5764. ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), inp_pos,
  5765. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5766. ext_factor, attn_factor, beta_fast, beta_slow
  5767. );
  5768. cb(Qcur, "Qcur", il);
  5769. struct ggml_tensor * Kcur = ggml_rope_custom(
  5770. ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), inp_pos,
  5771. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5772. ext_factor, attn_factor, beta_fast, beta_slow
  5773. );
  5774. cb(Kcur, "Kcur", il);
  5775. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5776. model.layers[il].wo, model.layers[il].bo,
  5777. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5778. cb(cur, "kqv_out", il);
  5779. }
  5780. // add the input
  5781. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  5782. cb(ffn_inp, "ffn_inp", il);
  5783. // FF
  5784. {
  5785. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5786. model.layers[il].ffn_norm,
  5787. model.layers[il].ffn_norm_b,
  5788. LLM_NORM, cb, il);
  5789. cb(cur, "ffn_norm", il);
  5790. cur = llm_build_ffn(ctx0, cur,
  5791. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  5792. NULL, NULL,
  5793. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  5794. NULL,
  5795. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  5796. cb(cur, "ffn_out", il);
  5797. }
  5798. inpL = ggml_add(ctx0, cur, ffn_inp);
  5799. cb(inpL, "l_out", il);
  5800. }
  5801. cur = llm_build_norm(ctx0, inpL, hparams,
  5802. model.output_norm,
  5803. model.output_norm_b,
  5804. LLM_NORM, cb, -1);
  5805. cb(cur, "result_norm", -1);
  5806. cur = ggml_mul_mat(ctx0, model.output, cur);
  5807. cb(cur, "result_output", -1);
  5808. ggml_build_forward_expand(gf, cur);
  5809. return gf;
  5810. }
  5811. struct ggml_cgraph * build_orion() {
  5812. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5813. const int64_t n_embd_head = hparams.n_embd_head_v;
  5814. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5815. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5816. struct ggml_tensor * cur;
  5817. struct ggml_tensor * inpL;
  5818. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5819. cb(inpL, "inp_embd", -1);
  5820. // inp_pos - contains the positions
  5821. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5822. cb(inp_pos, "inp_pos", -1);
  5823. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5824. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5825. cb(KQ_mask, "KQ_mask", -1);
  5826. // shift the entire K-cache if needed
  5827. if (do_rope_shift) {
  5828. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  5829. }
  5830. for (int il = 0; il < n_layer; ++il) {
  5831. struct ggml_tensor * inpSA = inpL;
  5832. // norm
  5833. cur = llm_build_norm(ctx0, inpL, hparams,
  5834. model.layers[il].attn_norm, model.layers[il].attn_norm_b,
  5835. LLM_NORM, cb, il);
  5836. cb(cur, "attn_norm", il);
  5837. // self-attention
  5838. {
  5839. // compute Q and K and RoPE them
  5840. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  5841. cb(Qcur, "Qcur", il);
  5842. // if (model.layers[il].bq) {
  5843. // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5844. // cb(Qcur, "Qcur", il);
  5845. // }
  5846. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  5847. cb(Kcur, "Kcur", il);
  5848. // if (model.layers[il].bk) {
  5849. // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5850. // cb(Kcur, "Kcur", il);
  5851. // }
  5852. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  5853. cb(Vcur, "Vcur", il);
  5854. // if (model.layers[il].bv) {
  5855. // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5856. // cb(Vcur, "Vcur", il);
  5857. // }
  5858. Qcur = ggml_rope_custom(
  5859. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  5860. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5861. ext_factor, attn_factor, beta_fast, beta_slow
  5862. );
  5863. cb(Qcur, "Qcur", il);
  5864. Kcur = ggml_rope_custom(
  5865. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  5866. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  5867. ext_factor, attn_factor, beta_fast, beta_slow
  5868. );
  5869. cb(Kcur, "Kcur", il);
  5870. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5871. model.layers[il].wo, NULL,
  5872. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5873. cb(cur, "kqv_out", il);
  5874. }
  5875. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5876. cb(ffn_inp, "ffn_inp", il);
  5877. // feed-forward network
  5878. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5879. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  5880. LLM_NORM, cb, il);
  5881. cb(cur, "ffn_norm", il);
  5882. cur = llm_build_ffn(ctx0, cur,
  5883. model.layers[il].ffn_up, NULL,
  5884. model.layers[il].ffn_gate, NULL,
  5885. model.layers[il].ffn_down, NULL,
  5886. NULL,
  5887. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5888. cb(cur, "ffn_out", il);
  5889. cur = ggml_add(ctx0, cur, ffn_inp);
  5890. cb(cur, "l_out", il);
  5891. // input for next layer
  5892. inpL = cur;
  5893. }
  5894. cur = inpL;
  5895. cur = llm_build_norm(ctx0, cur, hparams,
  5896. model.output_norm, model.output_norm_b,
  5897. LLM_NORM, cb, -1);
  5898. cb(cur, "result_norm", -1);
  5899. // lm_head
  5900. cur = ggml_mul_mat(ctx0, model.output, cur);
  5901. cb(cur, "result_output", -1);
  5902. ggml_build_forward_expand(gf, cur);
  5903. return gf;
  5904. }
  5905. struct ggml_cgraph * build_internlm2() {
  5906. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5907. const int64_t n_embd_head = hparams.n_embd_head_v;
  5908. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5909. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5910. struct ggml_tensor * cur;
  5911. struct ggml_tensor * inpL;
  5912. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  5913. cb(inpL, "inp_embd", -1);
  5914. // inp_pos - contains the positions
  5915. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  5916. cb(inp_pos, "inp_pos", -1);
  5917. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5918. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  5919. cb(KQ_mask, "KQ_mask", -1);
  5920. // shift the entire K-cache if needed
  5921. if (do_rope_shift) {
  5922. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  5923. }
  5924. for (int il = 0; il < n_layer; ++il) {
  5925. struct ggml_tensor * inpSA = inpL;
  5926. // norm
  5927. cur = llm_build_norm(ctx0, inpL, hparams,
  5928. model.layers[il].attn_norm, NULL,
  5929. LLM_NORM_RMS, cb, il);
  5930. cb(cur, "attn_norm", il);
  5931. // self-attention
  5932. {
  5933. // compute Q and K and RoPE them
  5934. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  5935. cb(Qcur, "Qcur", il);
  5936. if (model.layers[il].bq) {
  5937. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  5938. cb(Qcur, "Qcur", il);
  5939. }
  5940. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  5941. cb(Kcur, "Kcur", il);
  5942. if (model.layers[il].bk) {
  5943. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  5944. cb(Kcur, "Kcur", il);
  5945. }
  5946. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  5947. cb(Vcur, "Vcur", il);
  5948. if (model.layers[il].bv) {
  5949. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  5950. cb(Vcur, "Vcur", il);
  5951. }
  5952. Qcur = ggml_rope_custom(
  5953. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  5954. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  5955. ext_factor, attn_factor, beta_fast, beta_slow
  5956. );
  5957. cb(Qcur, "Qcur", il);
  5958. Kcur = ggml_rope_custom(
  5959. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  5960. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  5961. ext_factor, attn_factor, beta_fast, beta_slow
  5962. );
  5963. cb(Kcur, "Kcur", il);
  5964. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  5965. model.layers[il].wo, model.layers[il].bo,
  5966. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  5967. cb(cur, "kqv_out", il);
  5968. }
  5969. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  5970. cb(ffn_inp, "ffn_inp", il);
  5971. // feed-forward network
  5972. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  5973. model.layers[il].ffn_norm, NULL,
  5974. LLM_NORM_RMS, cb, il);
  5975. cb(cur, "ffn_norm", il);
  5976. cur = llm_build_ffn(ctx0, cur,
  5977. model.layers[il].ffn_up, NULL,
  5978. model.layers[il].ffn_gate, NULL,
  5979. model.layers[il].ffn_down, NULL,
  5980. NULL,
  5981. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  5982. cb(cur, "ffn_out", il);
  5983. cur = ggml_add(ctx0, cur, ffn_inp);
  5984. cb(cur, "l_out", il);
  5985. // input for next layer
  5986. inpL = cur;
  5987. }
  5988. cur = inpL;
  5989. cur = llm_build_norm(ctx0, cur, hparams,
  5990. model.output_norm, NULL,
  5991. LLM_NORM_RMS, cb, -1);
  5992. cb(cur, "result_norm", -1);
  5993. // lm_head
  5994. cur = ggml_mul_mat(ctx0, model.output, cur);
  5995. cb(cur, "result_output", -1);
  5996. ggml_build_forward_expand(gf, cur);
  5997. return gf;
  5998. }
  5999. // ref: https://arxiv.org/abs/2203.03466
  6000. // https://github.com/ggerganov/llama.cpp/issues/5276#issuecomment-1925774738
  6001. // based on the original build_llama() function
  6002. struct ggml_cgraph * build_minicpm() {
  6003. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6004. const int64_t n_embd_head = hparams.n_embd_head_v;
  6005. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6006. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6007. const int64_t n_embd = hparams.n_embd;
  6008. //TODO: if the model varies, these parameters need to be read from the model
  6009. const int64_t n_embd_base = 256;
  6010. const float scale_embd = 12.0f;
  6011. const float scale_depth = 1.4f;
  6012. struct ggml_tensor * cur;
  6013. struct ggml_tensor * inpL;
  6014. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
  6015. cb(inpL, "inp_embd", -1);
  6016. // scale the input embeddings
  6017. inpL = ggml_scale(ctx0, inpL, scale_embd);
  6018. cb(inpL, "inp_scaled", -1);
  6019. // inp_pos - contains the positions
  6020. struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
  6021. cb(inp_pos, "inp_pos", -1);
  6022. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6023. struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
  6024. cb(KQ_mask, "KQ_mask", -1);
  6025. // shift the entire K-cache if needed
  6026. if (do_rope_shift) {
  6027. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, lctx.inp_K_shift, LLM_ROPE, n_ctx, freq_base, freq_scale, cb);
  6028. }
  6029. for (int il = 0; il < n_layer; ++il) {
  6030. struct ggml_tensor * inpSA = inpL;
  6031. // norm
  6032. cur = llm_build_norm(ctx0, inpL, hparams,
  6033. model.layers[il].attn_norm, NULL,
  6034. LLM_NORM_RMS, cb, il);
  6035. cb(cur, "attn_norm", il);
  6036. // self-attention
  6037. {
  6038. // compute Q and K and RoPE them
  6039. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6040. cb(Qcur, "Qcur", il);
  6041. if (model.layers[il].bq) {
  6042. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6043. cb(Qcur, "Qcur", il);
  6044. }
  6045. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6046. cb(Kcur, "Kcur", il);
  6047. if (model.layers[il].bk) {
  6048. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6049. cb(Kcur, "Kcur", il);
  6050. }
  6051. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6052. cb(Vcur, "Vcur", il);
  6053. if (model.layers[il].bv) {
  6054. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6055. cb(Vcur, "Vcur", il);
  6056. }
  6057. Qcur = ggml_rope_custom(
  6058. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  6059. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  6060. ext_factor, attn_factor, beta_fast, beta_slow
  6061. );
  6062. cb(Qcur, "Qcur", il);
  6063. Kcur = ggml_rope_custom(
  6064. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  6065. hparams.n_rot, 0, 0, n_orig_ctx, freq_base, freq_scale,
  6066. ext_factor, attn_factor, beta_fast, beta_slow
  6067. );
  6068. cb(Kcur, "Kcur", il);
  6069. cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
  6070. model.layers[il].wo, model.layers[il].bo,
  6071. Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6072. cb(cur, "kqv_out", il);
  6073. }
  6074. // scale_res - scale the hidden states for residual connection
  6075. const float scale_res = scale_depth/sqrtf(float(n_layer));
  6076. cur = ggml_scale(ctx0, cur, scale_res);
  6077. cb(cur, "hidden_scaled", -1);
  6078. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6079. cb(ffn_inp, "ffn_inp", il);
  6080. // feed-forward network
  6081. {
  6082. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6083. model.layers[il].ffn_norm, NULL,
  6084. LLM_NORM_RMS, cb, il);
  6085. cb(cur, "ffn_norm", il);
  6086. cur = llm_build_ffn(ctx0, cur,
  6087. model.layers[il].ffn_up, NULL,
  6088. model.layers[il].ffn_gate, NULL,
  6089. model.layers[il].ffn_down, NULL,
  6090. NULL,
  6091. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  6092. cb(cur, "ffn_out", il);
  6093. }
  6094. // scale the hidden states for residual connection
  6095. cur = ggml_scale(ctx0, cur, scale_res);
  6096. cb(cur, "hidden_scaled_ffn", -1);
  6097. cur = ggml_add(ctx0, cur, ffn_inp);
  6098. cb(cur, "l_out", il);
  6099. // input for next layer
  6100. inpL = cur;
  6101. }
  6102. cur = inpL;
  6103. cur = llm_build_norm(ctx0, cur, hparams,
  6104. model.output_norm, NULL,
  6105. LLM_NORM_RMS, cb, -1);
  6106. cb(cur, "result_norm", -1);
  6107. // lm_head scaling
  6108. const float scale_lmhead = float(n_embd_base)/float(n_embd);
  6109. cur = ggml_scale(ctx0, cur, scale_lmhead);
  6110. cb(cur, "lmhead_scaling", -1);
  6111. // lm_head
  6112. cur = ggml_mul_mat(ctx0, model.tok_embd, cur);
  6113. cb(cur, "result_output", -1);
  6114. ggml_build_forward_expand(gf, cur);
  6115. return gf;
  6116. }
  6117. };
  6118. static struct ggml_cgraph * llama_build_graph(
  6119. llama_context & lctx,
  6120. const llama_batch & batch,
  6121. bool worst_case) {
  6122. const auto & model = lctx.model;
  6123. // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
  6124. llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
  6125. if (il >= 0) {
  6126. ggml_format_name(cur, "%s-%d", name, il);
  6127. } else {
  6128. ggml_set_name(cur, name);
  6129. }
  6130. if (!lctx.cparams.offload_kqv) {
  6131. if (strcmp(name, "kqv_merged_cont") == 0) {
  6132. // all nodes between the KV store and the attention output are run on the CPU
  6133. ggml_backend_sched_set_node_backend(lctx.sched, cur, lctx.backend_cpu);
  6134. }
  6135. }
  6136. };
  6137. struct ggml_cgraph * result = NULL;
  6138. struct llm_build_context llm(lctx, batch, cb, worst_case);
  6139. llm.init();
  6140. switch (model.arch) {
  6141. case LLM_ARCH_LLAMA:
  6142. {
  6143. result = llm.build_llama();
  6144. } break;
  6145. case LLM_ARCH_BAICHUAN:
  6146. {
  6147. result = llm.build_baichuan();
  6148. } break;
  6149. case LLM_ARCH_FALCON:
  6150. {
  6151. result = llm.build_falcon();
  6152. } break;
  6153. case LLM_ARCH_STARCODER:
  6154. {
  6155. result = llm.build_starcoder();
  6156. } break;
  6157. case LLM_ARCH_PERSIMMON:
  6158. {
  6159. result = llm.build_persimmon();
  6160. } break;
  6161. case LLM_ARCH_REFACT:
  6162. {
  6163. result = llm.build_refact();
  6164. } break;
  6165. case LLM_ARCH_BERT:
  6166. case LLM_ARCH_NOMIC_BERT:
  6167. {
  6168. result = llm.build_bert();
  6169. } break;
  6170. case LLM_ARCH_BLOOM:
  6171. {
  6172. result = llm.build_bloom();
  6173. } break;
  6174. case LLM_ARCH_MPT:
  6175. {
  6176. result = llm.build_mpt();
  6177. } break;
  6178. case LLM_ARCH_STABLELM:
  6179. {
  6180. result = llm.build_stablelm();
  6181. } break;
  6182. case LLM_ARCH_QWEN:
  6183. {
  6184. result = llm.build_qwen();
  6185. } break;
  6186. case LLM_ARCH_QWEN2:
  6187. {
  6188. result = llm.build_qwen2();
  6189. } break;
  6190. case LLM_ARCH_PHI2:
  6191. {
  6192. result = llm.build_phi2();
  6193. } break;
  6194. case LLM_ARCH_PLAMO:
  6195. {
  6196. result = llm.build_plamo();
  6197. } break;
  6198. case LLM_ARCH_GPT2:
  6199. {
  6200. result = llm.build_gpt2();
  6201. } break;
  6202. case LLM_ARCH_CODESHELL:
  6203. {
  6204. result = llm.build_codeshell();
  6205. } break;
  6206. case LLM_ARCH_ORION:
  6207. {
  6208. result = llm.build_orion();
  6209. } break;
  6210. case LLM_ARCH_INTERNLM2:
  6211. {
  6212. result = llm.build_internlm2();
  6213. } break;
  6214. case LLM_ARCH_MINICPM:
  6215. {
  6216. result = llm.build_minicpm();
  6217. } break;
  6218. default:
  6219. GGML_ASSERT(false);
  6220. }
  6221. llm.free();
  6222. return result;
  6223. }
  6224. static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
  6225. //
  6226. // set input data
  6227. //
  6228. const auto & hparams = lctx.model.hparams;
  6229. const auto & cparams = lctx.cparams;
  6230. const auto & kv_self = lctx.kv_self;
  6231. if (batch.token) {
  6232. const int64_t n_tokens = batch.n_tokens;
  6233. ggml_backend_tensor_set(lctx.inp_tokens, batch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens));
  6234. }
  6235. if (batch.embd) {
  6236. const int64_t n_embd = hparams.n_embd;
  6237. const int64_t n_tokens = batch.n_tokens;
  6238. ggml_backend_tensor_set(lctx.inp_embd, batch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd));
  6239. }
  6240. if (batch.pos) {
  6241. const int64_t n_tokens = batch.n_tokens;
  6242. ggml_backend_tensor_set(lctx.inp_pos, batch.pos, 0, n_tokens*ggml_element_size(lctx.inp_pos));
  6243. }
  6244. {
  6245. const int64_t n_kv = kv_self.n;
  6246. const int64_t n_tokens = batch.n_tokens;
  6247. assert(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
  6248. float * data = (float *) lctx.inp_KQ_mask->data;
  6249. for (int h = 0; h < 1; ++h) {
  6250. for (int j = 0; j < n_tokens; ++j) {
  6251. const llama_pos pos = batch.pos[j];
  6252. const llama_seq_id seq_id = batch.seq_id[j][0];
  6253. for (int i = 0; i < n_kv; ++i) {
  6254. float f;
  6255. if (!lctx.kv_self.cells[i].has_seq_id(seq_id) ||
  6256. (hparams.causal_attn && lctx.kv_self.cells[i].pos > pos)) {
  6257. f = -INFINITY;
  6258. } else {
  6259. f = 0;
  6260. }
  6261. data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
  6262. }
  6263. }
  6264. }
  6265. }
  6266. if (hparams.need_kq_pos) {
  6267. const int64_t n_kv = kv_self.n;
  6268. assert(ggml_backend_buffer_is_host(lctx.inp_KQ_pos->buffer));
  6269. float * data = (float *) lctx.inp_KQ_pos->data;
  6270. for (int i = 0; i < n_kv; ++i) {
  6271. data[i] = float(lctx.kv_self.cells[i].pos);
  6272. }
  6273. }
  6274. if (kv_self.has_shift) {
  6275. const int64_t n_ctx = cparams.n_ctx;
  6276. assert(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer));
  6277. int32_t * data = (int32_t *) lctx.inp_K_shift->data;
  6278. for (int i = 0; i < n_ctx; ++i) {
  6279. data[i] = lctx.kv_self.cells[i].delta;
  6280. }
  6281. }
  6282. if (cparams.do_pooling && hparams.pooling_type == LLAMA_POOLING_MEAN) {
  6283. const int64_t n_tokens = batch.n_tokens;
  6284. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
  6285. float * data = (float *) lctx.inp_mean->data;
  6286. memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean));
  6287. std::vector<uint64_t> sum(n_tokens, 0);
  6288. for (int i = 0; i < n_tokens; ++i) {
  6289. const llama_seq_id seq_id = batch.seq_id[i][0];
  6290. sum[seq_id] += 1;
  6291. }
  6292. std::vector<float> div(n_tokens, 0.0f);
  6293. for (int i = 0; i < n_tokens; ++i) {
  6294. const uint64_t s = sum[i];
  6295. if (s > 0) {
  6296. div[i] = 1.0f/float(s);
  6297. }
  6298. }
  6299. for (int i = 0; i < n_tokens; ++i) {
  6300. const llama_seq_id seq_id = batch.seq_id[i][0];
  6301. data[seq_id*n_tokens + i] = div[seq_id];
  6302. }
  6303. }
  6304. if (cparams.do_pooling && hparams.pooling_type == LLAMA_POOLING_CLS) {
  6305. const int64_t n_tokens = batch.n_tokens;
  6306. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
  6307. uint32_t * data = (uint32_t *) lctx.inp_cls->data;
  6308. for (int i = 0; i < n_tokens; ++i) {
  6309. const llama_seq_id seq_id = batch.seq_id[i][0];
  6310. const llama_pos pos = batch.pos[i];
  6311. if (pos == 0) {
  6312. data[seq_id] = i;
  6313. }
  6314. }
  6315. }
  6316. }
  6317. // decode a batch of tokens by evaluating the transformer
  6318. //
  6319. // - lctx: llama context
  6320. // - batch: batch to evaluate
  6321. //
  6322. // return 0 on success
  6323. // return positive int on warning
  6324. // return negative int on error
  6325. //
  6326. static int llama_decode_internal(
  6327. llama_context & lctx,
  6328. llama_batch batch) {
  6329. const uint32_t n_tokens = batch.n_tokens;
  6330. if (n_tokens == 0) {
  6331. LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
  6332. return -1;
  6333. }
  6334. const auto & model = lctx.model;
  6335. const auto & hparams = model.hparams;
  6336. const auto & cparams = lctx.cparams;
  6337. const auto n_batch = cparams.n_batch;
  6338. GGML_ASSERT(n_tokens <= n_batch);
  6339. int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
  6340. GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
  6341. const int64_t t_start_us = ggml_time_us();
  6342. #ifdef GGML_USE_MPI
  6343. // TODO: needs fix after #3228
  6344. GGML_ASSERT(false && "not implemented");
  6345. //ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
  6346. #endif
  6347. GGML_ASSERT(n_threads > 0);
  6348. auto & kv_self = lctx.kv_self;
  6349. const int64_t n_embd = hparams.n_embd;
  6350. const int64_t n_vocab = hparams.n_vocab;
  6351. // helpers for smoother batch API transition
  6352. // after deprecating the llama_eval calls, these will be removed
  6353. std::vector<llama_pos> pos;
  6354. std::vector<int32_t> n_seq_id;
  6355. std::vector<llama_seq_id *> seq_id_arr;
  6356. std::vector<std::vector<llama_seq_id>> seq_id;
  6357. if (batch.pos == nullptr) {
  6358. pos.resize(n_tokens);
  6359. for (uint32_t i = 0; i < n_tokens; i++) {
  6360. pos[i] = batch.all_pos_0 + i*batch.all_pos_1;
  6361. }
  6362. batch.pos = pos.data();
  6363. }
  6364. if (batch.seq_id == nullptr) {
  6365. n_seq_id.resize(n_tokens);
  6366. seq_id.resize(n_tokens);
  6367. seq_id_arr.resize(n_tokens);
  6368. for (uint32_t i = 0; i < n_tokens; i++) {
  6369. n_seq_id[i] = 1;
  6370. seq_id[i].resize(1);
  6371. seq_id[i][0] = batch.all_seq_id;
  6372. seq_id_arr[i] = seq_id[i].data();
  6373. }
  6374. batch.n_seq_id = n_seq_id.data();
  6375. batch.seq_id = seq_id_arr.data();
  6376. }
  6377. // if we have enough unused cells before the current head ->
  6378. // better to start searching from the beginning of the cache, hoping to fill it
  6379. if (kv_self.head > kv_self.used + 2*n_tokens) {
  6380. kv_self.head = 0;
  6381. }
  6382. if (!llama_kv_cache_find_slot(kv_self, batch)) {
  6383. return 1;
  6384. }
  6385. // a heuristic, to avoid attending the full cache if it is not yet utilized
  6386. // after enough generations, the benefit from this heuristic disappears
  6387. // if we start defragmenting the cache, the benefit from this will be more important
  6388. kv_self.n = std::min((int32_t) cparams.n_ctx, std::max(32, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32)));
  6389. //kv_self.n = llama_kv_cache_cell_max(kv_self);
  6390. //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
  6391. ggml_backend_sched_reset(lctx.sched);
  6392. ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data);
  6393. ggml_cgraph * gf = llama_build_graph(lctx, batch, false);
  6394. // the output is always the last tensor in the graph
  6395. struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
  6396. struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
  6397. if (strcmp(res->name, "result_output") == 0) {
  6398. // the embeddings could be the second to last tensor, or the third to last tensor
  6399. if (strcmp(embeddings->name, "result_norm") != 0) {
  6400. embeddings = gf->nodes[gf->n_nodes - 3];
  6401. GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
  6402. }
  6403. } else if (strcmp(res->name, "result_embd") == 0) {
  6404. embeddings = res;
  6405. res = nullptr;
  6406. } else {
  6407. GGML_ASSERT(false);
  6408. }
  6409. // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
  6410. // for big prompts, if BLAS is enabled, it is better to use only one thread
  6411. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  6412. // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
  6413. // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
  6414. // with the BLAS calls. need a better solution
  6415. // MoE Special Case: This logic applies when hparams.n_expert == 0, i.e. the model is NOT an MoE model. When an MoE is
  6416. // being processed then Accelerate/BLAS will not be involved, so capping would limit performance.
  6417. if (n_tokens >= 32 && hparams.n_expert == 0 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
  6418. n_threads = std::min(4, n_threads);
  6419. }
  6420. #ifdef GGML_USE_MPI
  6421. const int64_t n_layer = hparams.n_layer;
  6422. ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
  6423. #endif
  6424. #ifdef GGML_USE_METAL
  6425. if (ggml_backend_is_metal(lctx.backend_metal)) {
  6426. ggml_backend_metal_set_n_cb(lctx.backend_metal, n_threads);
  6427. }
  6428. #endif
  6429. if (lctx.backend_cpu != nullptr) {
  6430. ggml_backend_cpu_set_n_threads(lctx.backend_cpu, n_threads);
  6431. }
  6432. llama_set_inputs(lctx, batch);
  6433. ggml_backend_sched_graph_compute(lctx.sched, gf);
  6434. // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(lctx.sched));
  6435. #ifdef GGML_USE_MPI
  6436. ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
  6437. #endif
  6438. // update the kv ring buffer
  6439. {
  6440. if (kv_self.has_shift) {
  6441. kv_self.has_shift = false;
  6442. for (uint32_t i = 0; i < kv_self.size; ++i) {
  6443. kv_self.cells[i].delta = 0;
  6444. }
  6445. }
  6446. kv_self.head += n_tokens;
  6447. // Ensure kv cache head points to a valid index.
  6448. if (kv_self.head >= kv_self.size) {
  6449. kv_self.head = 0;
  6450. }
  6451. }
  6452. #ifdef GGML_PERF
  6453. // print timing information per ggml operation (for debugging purposes)
  6454. // requires GGML_PERF to be defined
  6455. ggml_graph_print(gf);
  6456. #endif
  6457. // plot the computation graph in dot format (for debugging purposes)
  6458. //if (n_past%100 == 0) {
  6459. // ggml_graph_dump_dot(gf, NULL, "llama.dot");
  6460. //}
  6461. // extract logits
  6462. // TODO: do not compute and extract logits if only embeddings are needed
  6463. // need to update the graphs to skip "result_output"
  6464. if (res) {
  6465. auto & logits_out = lctx.logits;
  6466. #ifndef NDEBUG
  6467. auto & logits_valid = lctx.logits_valid;
  6468. logits_valid.clear();
  6469. logits_valid.resize(n_tokens);
  6470. logits_out.clear();
  6471. #endif
  6472. ggml_backend_t res_backend = ggml_backend_sched_get_node_backend(lctx.sched, res);
  6473. GGML_ASSERT(res_backend != nullptr);
  6474. if (batch.logits) {
  6475. logits_out.resize(n_vocab * n_tokens);
  6476. for (uint32_t i = 0; i < n_tokens; i++) {
  6477. if (batch.logits[i] == 0) {
  6478. continue;
  6479. }
  6480. ggml_backend_tensor_get_async(res_backend, res, logits_out.data() + (n_vocab*i), (n_vocab*i)*sizeof(float), n_vocab*sizeof(float));
  6481. #ifndef NDEBUG
  6482. logits_valid[i] = true;
  6483. #endif
  6484. }
  6485. } else if (lctx.logits_all) {
  6486. logits_out.resize(n_vocab * n_tokens);
  6487. ggml_backend_tensor_get_async(res_backend, res, logits_out.data(), 0, n_vocab*n_tokens*sizeof(float));
  6488. #ifndef NDEBUG
  6489. std::fill(logits_valid.begin(), logits_valid.end(), true);
  6490. #endif
  6491. } else {
  6492. logits_out.resize(n_vocab);
  6493. ggml_backend_tensor_get_async(res_backend, res, logits_out.data(), (n_vocab*(n_tokens - 1))*sizeof(float), n_vocab*sizeof(float));
  6494. #ifndef NDEBUG
  6495. logits_valid[0] = true;
  6496. #endif
  6497. }
  6498. ggml_backend_synchronize(res_backend);
  6499. }
  6500. // extract embeddings
  6501. if (!lctx.embedding.empty()) {
  6502. auto & embedding_out = lctx.embedding;
  6503. const int64_t embd_pos = res ? n_embd * (n_tokens-1) : 0;
  6504. const int64_t embd_size = res ? n_embd : n_embd * n_tokens;
  6505. embedding_out.resize(embd_size);
  6506. ggml_backend_t embeddings_backend = ggml_backend_sched_get_node_backend(lctx.sched, embeddings);
  6507. ggml_backend_tensor_get_async(embeddings_backend, embeddings, embedding_out.data(), embd_pos*sizeof(float), embd_size*sizeof(float));
  6508. ggml_backend_synchronize(embeddings_backend);
  6509. }
  6510. // measure the performance only for the single-token evals
  6511. if (n_tokens == 1) {
  6512. lctx.t_eval_us += ggml_time_us() - t_start_us;
  6513. lctx.n_eval++;
  6514. }
  6515. else if (n_tokens > 1) {
  6516. lctx.t_p_eval_us += ggml_time_us() - t_start_us;
  6517. lctx.n_p_eval += n_tokens;
  6518. }
  6519. // get a more accurate load time, upon first eval
  6520. // TODO: fix this
  6521. if (!lctx.has_evaluated_once) {
  6522. lctx.t_load_us = ggml_time_us() - lctx.t_start_us;
  6523. lctx.has_evaluated_once = true;
  6524. }
  6525. return 0;
  6526. }
  6527. //
  6528. // tokenizer
  6529. //
  6530. static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
  6531. return vocab.type;
  6532. }
  6533. static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
  6534. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL;
  6535. }
  6536. static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
  6537. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_UNKNOWN;
  6538. }
  6539. static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
  6540. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_CONTROL;
  6541. }
  6542. static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
  6543. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE;
  6544. }
  6545. static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id) {
  6546. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_USER_DEFINED;
  6547. }
  6548. static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
  6549. GGML_ASSERT(llama_is_byte_token(vocab, id));
  6550. const auto& token_data = vocab.id_to_token.at(id);
  6551. switch (llama_vocab_get_type(vocab)) {
  6552. case LLAMA_VOCAB_TYPE_SPM: {
  6553. auto buf = token_data.text.substr(3, 2);
  6554. return strtol(buf.c_str(), NULL, 16);
  6555. }
  6556. case LLAMA_VOCAB_TYPE_BPE: {
  6557. GGML_ASSERT(false);
  6558. return unicode_to_bytes_bpe(token_data.text);
  6559. }
  6560. case LLAMA_VOCAB_TYPE_WPM: {
  6561. GGML_ASSERT(false);
  6562. }
  6563. default:
  6564. GGML_ASSERT(false);
  6565. }
  6566. }
  6567. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
  6568. static const char * hex = "0123456789ABCDEF";
  6569. switch (llama_vocab_get_type(vocab)) {
  6570. case LLAMA_VOCAB_TYPE_SPM: {
  6571. const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
  6572. auto token = vocab.token_to_id.find(buf);
  6573. if (token != vocab.token_to_id.end()) {
  6574. return (*token).second;
  6575. }
  6576. // Try to fall back to just the byte as a string
  6577. const char buf2[2] = { (char)ch, 0 };
  6578. return vocab.token_to_id.at(buf2);
  6579. }
  6580. case LLAMA_VOCAB_TYPE_WPM:
  6581. case LLAMA_VOCAB_TYPE_BPE: {
  6582. return vocab.token_to_id.at(bytes_to_unicode_bpe(ch));
  6583. }
  6584. default:
  6585. GGML_ASSERT(false);
  6586. }
  6587. }
  6588. static void llama_escape_whitespace(std::string & text) {
  6589. replace_all(text, " ", "\xe2\x96\x81");
  6590. }
  6591. static void llama_unescape_whitespace(std::string & word) {
  6592. replace_all(word, "\xe2\x96\x81", " ");
  6593. }
  6594. struct llm_symbol {
  6595. using index = int;
  6596. index prev;
  6597. index next;
  6598. const char * text;
  6599. size_t n;
  6600. };
  6601. static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
  6602. // SPM tokenizer
  6603. // original implementation:
  6604. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  6605. struct llm_bigram_spm {
  6606. struct comparator {
  6607. bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
  6608. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  6609. }
  6610. };
  6611. using queue_storage = std::vector<llm_bigram_spm>;
  6612. using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
  6613. llm_symbol::index left;
  6614. llm_symbol::index right;
  6615. float score;
  6616. size_t size;
  6617. };
  6618. struct llm_tokenizer_spm {
  6619. llm_tokenizer_spm(const llama_vocab & vocab) : vocab(vocab) {}
  6620. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  6621. // split string into utf8 chars
  6622. int index = 0;
  6623. size_t offs = 0;
  6624. while (offs < text.size()) {
  6625. llm_symbol sym;
  6626. size_t len = utf8_len(text[offs]);
  6627. sym.text = text.c_str() + offs;
  6628. sym.n = std::min(len, text.size() - offs);
  6629. offs += sym.n;
  6630. sym.prev = index - 1;
  6631. sym.next = offs == text.size() ? -1 : index + 1;
  6632. index++;
  6633. symbols.emplace_back(sym);
  6634. }
  6635. // seed the work queue with all possible 2-character tokens.
  6636. for (size_t i = 1; i < symbols.size(); ++i) {
  6637. try_add_bigram(i - 1, i);
  6638. }
  6639. // keep substituting the highest frequency pairs for as long as we can.
  6640. while (!work_queue.empty()) {
  6641. auto bigram = work_queue.top();
  6642. work_queue.pop();
  6643. auto & left_sym = symbols[bigram.left];
  6644. auto & right_sym = symbols[bigram.right];
  6645. // if one of the symbols already got merged, skip it.
  6646. if (left_sym.n == 0 || right_sym.n == 0 ||
  6647. left_sym.n + right_sym.n != bigram.size) {
  6648. continue;
  6649. }
  6650. // merge the right sym into the left one
  6651. left_sym.n += right_sym.n;
  6652. right_sym.n = 0;
  6653. //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  6654. // remove the right sym from the chain
  6655. left_sym.next = right_sym.next;
  6656. if (right_sym.next >= 0) {
  6657. symbols[right_sym.next].prev = bigram.left;
  6658. }
  6659. // find more substitutions
  6660. try_add_bigram(left_sym.prev, bigram.left);
  6661. try_add_bigram(bigram.left, left_sym.next);
  6662. }
  6663. for (int i = 0; i != -1; i = symbols[i].next) {
  6664. auto & symbol = symbols[i];
  6665. resegment(symbol, output);
  6666. }
  6667. }
  6668. private:
  6669. void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
  6670. auto text = std::string(symbol.text, symbol.n);
  6671. auto token = vocab.token_to_id.find(text);
  6672. // Do we need to support is_unused?
  6673. if (token != vocab.token_to_id.end()) {
  6674. output.push_back((*token).second);
  6675. return;
  6676. }
  6677. const auto p = rev_merge.find(text);
  6678. if (p == rev_merge.end()) {
  6679. // output any symbols that did not form tokens as bytes.
  6680. output.reserve(output.size() + symbol.n);
  6681. for (int j = 0; j < (int)symbol.n; ++j) {
  6682. llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
  6683. output.push_back(token_id);
  6684. }
  6685. return;
  6686. }
  6687. resegment(symbols[p->second.first], output);
  6688. resegment(symbols[p->second.second], output);
  6689. }
  6690. void try_add_bigram(int left, int right) {
  6691. if (left == -1 || right == -1) {
  6692. return;
  6693. }
  6694. const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
  6695. auto token = vocab.token_to_id.find(text);
  6696. if (token == vocab.token_to_id.end()) {
  6697. return;
  6698. }
  6699. if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
  6700. return;
  6701. }
  6702. const auto & tok_data = vocab.id_to_token[(*token).second];
  6703. llm_bigram_spm bigram;
  6704. bigram.left = left;
  6705. bigram.right = right;
  6706. bigram.score = tok_data.score;
  6707. bigram.size = text.size();
  6708. work_queue.push(bigram);
  6709. // Do we need to support is_unused?
  6710. rev_merge[text] = std::make_pair(left, right);
  6711. }
  6712. const llama_vocab & vocab;
  6713. std::vector<llm_symbol> symbols;
  6714. llm_bigram_spm::queue work_queue;
  6715. std::map<std::string, std::pair<int, int>> rev_merge;
  6716. };
  6717. // BPE tokenizer
  6718. // adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
  6719. // tried to simplify unicode stuff, so most likely does not work 100% correctly!
  6720. // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
  6721. struct llm_bigram_bpe {
  6722. struct comparator {
  6723. bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
  6724. return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
  6725. }
  6726. };
  6727. using queue_storage = std::vector<llm_bigram_bpe>;
  6728. using queue = std::priority_queue<llm_bigram_bpe, queue_storage, comparator>;
  6729. llm_symbol::index left;
  6730. llm_symbol::index right;
  6731. std::string text;
  6732. int rank;
  6733. size_t size;
  6734. };
  6735. struct llm_tokenizer_bpe {
  6736. llm_tokenizer_bpe(const llama_vocab & vocab): vocab(vocab) {}
  6737. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  6738. int final_prev_index = -1;
  6739. auto word_collection = bpe_gpt2_preprocess(text);
  6740. symbols_final.clear();
  6741. for (auto & word : word_collection) {
  6742. work_queue = llm_bigram_bpe::queue();
  6743. symbols.clear();
  6744. int index = 0;
  6745. size_t offset = 0;
  6746. while (offset < word.size()) {
  6747. llm_symbol sym;
  6748. size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
  6749. sym.text = word.c_str() + offset;
  6750. sym.n = char_len;
  6751. offset += sym.n;
  6752. sym.prev = index - 1;
  6753. sym.next = offset == word.size() ? -1 : index + 1;
  6754. index++;
  6755. symbols.emplace_back(sym);
  6756. }
  6757. for (size_t i = 1; i < symbols.size(); ++i) {
  6758. add_new_bigram(i - 1, i);
  6759. }
  6760. // build token(s)
  6761. while (!work_queue.empty()) {
  6762. auto bigram = work_queue.top();
  6763. work_queue.pop();
  6764. auto & left_symbol = symbols[bigram.left];
  6765. auto & right_symbol = symbols[bigram.right];
  6766. if (left_symbol.n == 0 || right_symbol.n == 0) {
  6767. continue;
  6768. }
  6769. std::string left_token = std::string(left_symbol.text, left_symbol.n);
  6770. std::string right_token = std::string(right_symbol.text, right_symbol.n);
  6771. if (left_token + right_token != bigram.text) {
  6772. continue; // Skip this bigram if it's outdated
  6773. }
  6774. // merge the right sym into the left one
  6775. left_symbol.n += right_symbol.n;
  6776. right_symbol.n = 0;
  6777. // remove the right sym from the chain
  6778. left_symbol.next = right_symbol.next;
  6779. if (right_symbol.next >= 0) {
  6780. symbols[right_symbol.next].prev = bigram.left;
  6781. }
  6782. add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
  6783. add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
  6784. }
  6785. // add the fnished tokens to the final list keeping correct order for next and prev
  6786. for (auto & sym : symbols) {
  6787. if (sym.n > 0) {
  6788. sym.prev = final_prev_index;
  6789. sym.next = -1;
  6790. if (final_prev_index != -1) {
  6791. symbols_final[final_prev_index].next = symbols_final.size();
  6792. }
  6793. symbols_final.emplace_back(sym);
  6794. final_prev_index = symbols_final.size() - 1;
  6795. }
  6796. }
  6797. }
  6798. symbols = symbols_final;
  6799. if (!symbols.empty()) {
  6800. for (int i = 0; i != -1; i = symbols[i].next) {
  6801. auto & symbol = symbols[i];
  6802. if (symbol.n == 0) {
  6803. continue;
  6804. }
  6805. const std::string str = std::string(symbol.text, symbol.n);
  6806. const auto token = vocab.token_to_id.find(str);
  6807. if (token == vocab.token_to_id.end()) {
  6808. for (auto j = str.begin(); j != str.end(); ++j) {
  6809. std::string byte_str(1, *j);
  6810. auto token_multibyte = vocab.token_to_id.find(byte_str);
  6811. if (token_multibyte == vocab.token_to_id.end()) {
  6812. throw std::runtime_error("ERROR: byte not found in vocab");
  6813. }
  6814. output.push_back((*token_multibyte).second);
  6815. }
  6816. } else {
  6817. output.push_back((*token).second);
  6818. }
  6819. }
  6820. }
  6821. }
  6822. private:
  6823. void add_new_bigram(int left, int right) {
  6824. if (left == -1 || right == -1) {
  6825. return;
  6826. }
  6827. std::string left_token = std::string(symbols[left].text, symbols[left].n);
  6828. std::string right_token = std::string(symbols[right].text, symbols[right].n);
  6829. int rank_found = -1;
  6830. rank_found = vocab.find_bpe_rank(left_token, right_token);
  6831. if (rank_found < 0) {
  6832. return;
  6833. }
  6834. llm_bigram_bpe bigram;
  6835. bigram.left = left;
  6836. bigram.right = right;
  6837. bigram.text = left_token + right_token;
  6838. bigram.size = left_token.size() + right_token.size();
  6839. bigram.rank = rank_found;
  6840. work_queue.push(bigram);
  6841. }
  6842. std::vector<std::string> bpe_gpt2_preprocess(const std::string & text) {
  6843. std::vector<std::string> bpe_words;
  6844. std::vector<std::string> bpe_encoded_words;
  6845. std::string token = "";
  6846. // GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+
  6847. bool collecting_numeric = false;
  6848. bool collecting_letter = false;
  6849. bool collecting_special = false;
  6850. bool collecting_whitespace_lookahead = false;
  6851. bool collecting = false;
  6852. std::vector<std::string> text_utf;
  6853. text_utf.reserve(text.size());
  6854. bpe_words.reserve(text.size());
  6855. bpe_encoded_words.reserve(text.size());
  6856. auto cps = codepoints_from_utf8(text);
  6857. for (size_t i = 0; i < cps.size(); ++i)
  6858. text_utf.emplace_back(codepoint_to_utf8(cps[i]));
  6859. for (int i = 0; i < (int)text_utf.size(); i++) {
  6860. const std::string & utf_char = text_utf[i];
  6861. bool split_condition = false;
  6862. int bytes_remain = text_utf.size() - i;
  6863. // forward backward lookups
  6864. const std::string & utf_char_next = (i + 1 < (int)text_utf.size()) ? text_utf[i + 1] : "";
  6865. const std::string & utf_char_next_next = (i + 2 < (int)text_utf.size()) ? text_utf[i + 2] : "";
  6866. // handling contractions
  6867. if (!split_condition && bytes_remain >= 2) {
  6868. // 's|'t|'m|'d
  6869. if (utf_char == "\'" && (utf_char_next == "s" || utf_char_next == "t" || utf_char_next == "m" || utf_char_next == "d")) {
  6870. split_condition = true;
  6871. }
  6872. if (split_condition) {
  6873. if (token.size()) {
  6874. bpe_words.emplace_back(token); // push previous content as token
  6875. }
  6876. token = utf_char + utf_char_next;
  6877. bpe_words.emplace_back(token);
  6878. token = "";
  6879. i++;
  6880. continue;
  6881. }
  6882. }
  6883. if (!split_condition && bytes_remain >= 3) {
  6884. // 're|'ve|'ll
  6885. if (utf_char == "\'" && (
  6886. (utf_char_next == "r" && utf_char_next_next == "e") ||
  6887. (utf_char_next == "v" && utf_char_next_next == "e") ||
  6888. (utf_char_next == "l" && utf_char_next_next == "l"))
  6889. ) {
  6890. split_condition = true;
  6891. }
  6892. if (split_condition) {
  6893. // current token + next token can be defined
  6894. if (token.size()) {
  6895. bpe_words.emplace_back(token); // push previous content as token
  6896. }
  6897. token = utf_char + utf_char_next + utf_char_next_next;
  6898. bpe_words.emplace_back(token); // the contraction
  6899. token = "";
  6900. i += 2;
  6901. continue;
  6902. }
  6903. }
  6904. if (!split_condition && !collecting) {
  6905. if (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER)) {
  6906. collecting_letter = true;
  6907. collecting = true;
  6908. }
  6909. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
  6910. collecting_numeric = true;
  6911. collecting = true;
  6912. }
  6913. else if (
  6914. ((codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) && (codepoint_type(utf_char) != CODEPOINT_TYPE_WHITESPACE)) ||
  6915. (!token.size() && utf_char == " " && codepoint_type(utf_char_next) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char_next) != CODEPOINT_TYPE_DIGIT && codepoint_type(utf_char_next) != CODEPOINT_TYPE_WHITESPACE)
  6916. ) {
  6917. collecting_special = true;
  6918. collecting = true;
  6919. }
  6920. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE && codepoint_type(utf_char_next) == CODEPOINT_TYPE_WHITESPACE) {
  6921. collecting_whitespace_lookahead = true;
  6922. collecting = true;
  6923. }
  6924. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE) {
  6925. split_condition = true;
  6926. }
  6927. }
  6928. else if (!split_condition && collecting) {
  6929. if (collecting_letter && codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER) {
  6930. split_condition = true;
  6931. }
  6932. else if (collecting_numeric && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) {
  6933. split_condition = true;
  6934. }
  6935. else if (collecting_special && (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE)) {
  6936. split_condition = true;
  6937. }
  6938. else if (collecting_whitespace_lookahead && (codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
  6939. split_condition = true;
  6940. }
  6941. }
  6942. if (utf_char_next == "") {
  6943. split_condition = true; // final
  6944. token += utf_char;
  6945. }
  6946. if (split_condition) {
  6947. if (token.size()) {
  6948. bpe_words.emplace_back(token);
  6949. }
  6950. token = utf_char;
  6951. collecting = false;
  6952. collecting_letter = false;
  6953. collecting_numeric = false;
  6954. collecting_special = false;
  6955. collecting_whitespace_lookahead = false;
  6956. }
  6957. else {
  6958. token += utf_char;
  6959. }
  6960. }
  6961. for (std::string & word : bpe_words) {
  6962. std::string encoded_token = "";
  6963. for (char & c : word) {
  6964. encoded_token += bytes_to_unicode_bpe(c);
  6965. }
  6966. bpe_encoded_words.emplace_back(encoded_token);
  6967. }
  6968. return bpe_encoded_words;
  6969. }
  6970. const llama_vocab & vocab;
  6971. std::vector<llm_symbol> symbols;
  6972. std::vector<llm_symbol> symbols_final;
  6973. llm_bigram_bpe::queue work_queue;
  6974. };
  6975. struct llm_tokenizer_wpm {
  6976. llm_tokenizer_wpm(const llama_vocab & vocab): vocab(vocab) {}
  6977. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  6978. auto * token_map = &vocab.token_to_id;
  6979. // normalize and split by whitespace
  6980. std::vector<std::string> words = preprocess(text);
  6981. // bos token prepended already
  6982. // find the longest tokens that form the words
  6983. for (const std::string &word : words) {
  6984. // skip empty words
  6985. if (word.size() == 0) {
  6986. continue;
  6987. }
  6988. // prepend phantom space
  6989. std::string word1 = "\xe2\x96\x81" + word;
  6990. int n = word1.size();
  6991. // we're at the start of a new word
  6992. int i = 0;
  6993. bool match_any = false;
  6994. // move through character position in word
  6995. while (i < n) {
  6996. // loop through possible match length
  6997. bool match = false;
  6998. for (int j = n; j > i; j--) {
  6999. auto it = token_map->find(word1.substr(i, j - i));
  7000. if (it != token_map->end()) {
  7001. output.push_back(it->second);
  7002. match = true;
  7003. match_any = true;
  7004. i = j;
  7005. break;
  7006. }
  7007. }
  7008. // must be an unknown character
  7009. if (!match) {
  7010. i++;
  7011. }
  7012. }
  7013. // we didn't find any matches for this word
  7014. if (!match_any) {
  7015. output.push_back(vocab.special_unk_id);
  7016. }
  7017. }
  7018. // append eos token
  7019. output.push_back(vocab.special_eos_id);
  7020. }
  7021. std::vector<std::string> preprocess(const std::string & text) {
  7022. std::string ori_str = normalize(text);
  7023. uint64_t ori_size = ori_str.size();
  7024. // single punct / single symbol / single digit
  7025. // baseline: add whitespace on the left and right of punct and chinese characters
  7026. std::vector<std::string> words;
  7027. std::string new_str = "";
  7028. uint64_t i = 0;
  7029. while (i < ori_size) {
  7030. int utf_char_len = utf8_len(ori_str[i]);
  7031. if ((utf_char_len == 1) && ispunct(ori_str[i])) {
  7032. new_str += " ";
  7033. new_str += ori_str[i];
  7034. new_str += " ";
  7035. i += 1;
  7036. }
  7037. else if ((utf_char_len == 3) && is_chinese_char(ori_str.substr(i, 3))) {
  7038. new_str += " ";
  7039. new_str += ori_str.substr(i, 3);
  7040. new_str += " ";
  7041. i += 3;
  7042. }
  7043. else {
  7044. new_str += ori_str[i];
  7045. i += 1;
  7046. }
  7047. }
  7048. // split by whitespace
  7049. uint64_t l = 0;
  7050. uint64_t r = 0;
  7051. while (r < new_str.size()) {
  7052. // if is whitespace
  7053. if (isspace(new_str[r])) {
  7054. if (r > l) words.push_back(new_str.substr(l, (r - l)));
  7055. l = r + 1;
  7056. r = l;
  7057. }
  7058. else {
  7059. r += 1;
  7060. }
  7061. }
  7062. if (r > l) {
  7063. words.push_back(new_str.substr(l, (r - l)));
  7064. }
  7065. return words;
  7066. }
  7067. std::string normalize(const std::string & text) {
  7068. // TODO: handle chinese characters? https://github.com/huggingface/tokenizers/blob/ef5f50605ddf9f8caef1598c0e4853862b9707a7/tokenizers/src/normalizers/bert.rs#L98
  7069. std::string text2 = strip_accents(text);
  7070. for (size_t i = 0; i < text2.size(); i += utf8_len(text2[i])) {
  7071. char c = text2[i];
  7072. if (c >= 'A' && c <= 'Z') {
  7073. text2[i] = c - 'A' + 'a';
  7074. }
  7075. }
  7076. return text2;
  7077. }
  7078. bool is_chinese_char(const std::string & str) {
  7079. int len = str.length();
  7080. unsigned int codepoint = 0;
  7081. int num_bytes = 0;
  7082. int i = 0;
  7083. unsigned char ch = static_cast<unsigned char>(str[i]);
  7084. if (ch <= 0x7f) {
  7085. codepoint = ch;
  7086. num_bytes = 1;
  7087. } else if ((ch >> 5) == 0x06) {
  7088. codepoint = ch & 0x1f;
  7089. num_bytes = 2;
  7090. } else if ((ch >> 4) == 0x0e) {
  7091. codepoint = ch & 0x0f;
  7092. num_bytes = 3;
  7093. } else if ((ch >> 3) == 0x1e) {
  7094. codepoint = ch & 0x07;
  7095. num_bytes = 4;
  7096. }
  7097. for (int j = 1; j < num_bytes; ++j) {
  7098. if (i + j >= len) {
  7099. return false; // incomplete UTF-8 character
  7100. }
  7101. unsigned char next_ch = static_cast<unsigned char>(str[i + j]);
  7102. if ((next_ch >> 6) != 0x02) {
  7103. return false; // invalid trailing byte
  7104. }
  7105. codepoint = (codepoint << 6) | (next_ch & 0x3f);
  7106. }
  7107. if ((codepoint >= 0x4E00 && codepoint <= 0x9FFF) ||
  7108. (codepoint >= 0x3400 && codepoint <= 0x4DBF) ||
  7109. (codepoint >= 0x20000 && codepoint <= 0x2A6DF) ||
  7110. (codepoint >= 0x2A700 && codepoint <= 0x2B73F) ||
  7111. (codepoint >= 0x2B740 && codepoint <= 0x2B81F) ||
  7112. (codepoint >= 0x2B920 && codepoint <= 0x2CEAF) || // this should be 0x2B820 but in hf rust code it is 0x2B920
  7113. (codepoint >= 0xF900 && codepoint <= 0xFAFF) ||
  7114. (codepoint >= 0x2F800 && codepoint <= 0x2FA1F) ||
  7115. (codepoint >= 0x3000 && codepoint <= 0x303F) ||
  7116. (codepoint >= 0xFF00 && codepoint <= 0xFFEF)) {
  7117. return true; // NOLINT
  7118. }
  7119. return false;
  7120. }
  7121. std::string strip_accents(const std::string & input_string) {
  7122. std::string resultString;
  7123. std::map<std::string, char> accent_map = {
  7124. {"À", 'A'}, {"Á", 'A'}, {"Â", 'A'}, {"Ã", 'A'}, {"Ä", 'A'}, {"Å", 'A'},
  7125. {"à", 'a'}, {"á", 'a'}, {"â", 'a'}, {"ã", 'a'}, {"ä", 'a'}, {"å", 'a'},
  7126. {"È", 'E'}, {"É", 'E'}, {"Ê", 'E'}, {"Ë", 'E'}, {"è", 'e'}, {"é", 'e'},
  7127. {"ê", 'e'}, {"ë", 'e'}, {"Ì", 'I'}, {"Í", 'I'}, {"Î", 'I'}, {"Ï", 'I'},
  7128. {"ì", 'i'}, {"í", 'i'}, {"î", 'i'}, {"ï", 'i'}, {"Ò", 'O'}, {"Ó", 'O'},
  7129. {"Ô", 'O'}, {"Õ", 'O'}, {"Ö", 'O'}, {"ò", 'o'}, {"ó", 'o'}, {"ô", 'o'},
  7130. {"õ", 'o'}, {"ö", 'o'}, {"Ù", 'U'}, {"Ú", 'U'}, {"Û", 'U'}, {"Ü", 'U'},
  7131. {"ù", 'u'}, {"ú", 'u'}, {"û", 'u'}, {"ü", 'u'}, {"Ý", 'Y'}, {"ý", 'y'},
  7132. {"Ç", 'C'}, {"ç", 'c'}, {"Ñ", 'N'}, {"ñ", 'n'},
  7133. };
  7134. for (size_t i = 0; i < input_string.length();) {
  7135. int len = utf8_len(input_string[i]);
  7136. std::string curChar = input_string.substr(i, len);
  7137. auto iter = accent_map.find(curChar);
  7138. if (iter != accent_map.end()) {
  7139. resultString += iter->second;
  7140. } else {
  7141. resultString += curChar;
  7142. }
  7143. i += len;
  7144. }
  7145. return resultString;
  7146. }
  7147. static size_t utf8_len(char src) {
  7148. const size_t lookup[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4};
  7149. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  7150. return lookup[highbits];
  7151. }
  7152. const llama_vocab & vocab;
  7153. };
  7154. typedef enum FRAGMENT_BUFFER_VARIANT_TYPE {
  7155. FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
  7156. FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
  7157. } FRAGMENT_BUFFER_VARIANT_TYPE;
  7158. struct fragment_buffer_variant {
  7159. fragment_buffer_variant(llama_vocab::id _token)
  7160. :
  7161. type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
  7162. token(_token),
  7163. raw_text(_dummy),
  7164. offset(0),
  7165. length(0) {}
  7166. fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
  7167. :
  7168. type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
  7169. token((llama_vocab::id) - 1),
  7170. raw_text(_raw_text),
  7171. offset(_offset),
  7172. length(_length){
  7173. GGML_ASSERT(_offset >= 0);
  7174. GGML_ASSERT(_length >= 1);
  7175. GGML_ASSERT(offset + length <= raw_text.length());
  7176. }
  7177. const FRAGMENT_BUFFER_VARIANT_TYPE type;
  7178. const llama_vocab::id token;
  7179. const std::string _dummy;
  7180. const std::string & raw_text;
  7181. const uint64_t offset;
  7182. const uint64_t length;
  7183. };
  7184. // #define PRETOKENIZERDEBUG
  7185. static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer) {
  7186. // for each special token
  7187. for (const auto & st: vocab.special_tokens_cache) {
  7188. const auto & special_token = st.first;
  7189. const auto & special_id = st.second;
  7190. // for each text fragment
  7191. std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
  7192. while (it != buffer.end()) {
  7193. auto & fragment = (*it);
  7194. // if a fragment is text ( not yet processed )
  7195. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  7196. auto * raw_text = &(fragment.raw_text);
  7197. auto raw_text_base_offset = fragment.offset;
  7198. auto raw_text_base_length = fragment.length;
  7199. // loop over the text
  7200. while (true) {
  7201. // find the first occurrence of a given special token in this fragment
  7202. // passing offset argument only limit the "search area" but match coordinates
  7203. // are still relative to the source full raw_text
  7204. auto match = raw_text->find(special_token, raw_text_base_offset);
  7205. // no occurrences found, stop processing this fragment for a given special token
  7206. if (match == std::string::npos) break;
  7207. // check if match is within bounds of offset <-> length
  7208. if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
  7209. #ifdef PRETOKENIZERDEBUG
  7210. LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  7211. #endif
  7212. auto source = std::distance(buffer.begin(), it);
  7213. // if match is further than base offset
  7214. // then we have some text to the left of it
  7215. if (match > raw_text_base_offset) {
  7216. // left
  7217. const int64_t left_reminder_offset = raw_text_base_offset + 0;
  7218. const int64_t left_reminder_length = match - raw_text_base_offset;
  7219. buffer.emplace_after(it, (*raw_text), left_reminder_offset, left_reminder_length);
  7220. #ifdef PRETOKENIZERDEBUG
  7221. LLAMA_LOG_WARN("FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
  7222. #endif
  7223. it++;
  7224. }
  7225. // special token
  7226. buffer.emplace_after(it, special_id);
  7227. it++;
  7228. // right
  7229. if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
  7230. const int64_t right_reminder_offset = match + special_token.length();
  7231. const int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
  7232. buffer.emplace_after(it, (*raw_text), right_reminder_offset, right_reminder_length);
  7233. #ifdef PRETOKENIZERDEBUG
  7234. LLAMA_LOG_WARN("FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
  7235. #endif
  7236. it++;
  7237. if (source == 0) {
  7238. buffer.erase_after(buffer.before_begin());
  7239. } else {
  7240. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  7241. }
  7242. // repeat for the right side
  7243. raw_text_base_offset = right_reminder_offset;
  7244. raw_text_base_length = right_reminder_length;
  7245. #ifdef PRETOKENIZERDEBUG
  7246. LLAMA_LOG_WARN("RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  7247. #endif
  7248. } else {
  7249. if (source == 0) {
  7250. buffer.erase_after(buffer.before_begin());
  7251. } else {
  7252. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  7253. }
  7254. break;
  7255. }
  7256. }
  7257. }
  7258. it++;
  7259. }
  7260. }
  7261. }
  7262. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special) {
  7263. std::vector<llama_vocab::id> output;
  7264. // OG tokenizer behavior:
  7265. //
  7266. // tokenizer.encode('', add_bos=True) returns [1]
  7267. // tokenizer.encode('', add_bos=False) returns []
  7268. if (bos && vocab.special_bos_id != -1) {
  7269. output.push_back(vocab.special_bos_id);
  7270. }
  7271. if (raw_text.empty()) {
  7272. return output;
  7273. }
  7274. std::forward_list<fragment_buffer_variant> fragment_buffer;
  7275. fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
  7276. if (special) tokenizer_st_partition(vocab, fragment_buffer);
  7277. switch (vocab.type) {
  7278. case LLAMA_VOCAB_TYPE_SPM:
  7279. {
  7280. for (const auto & fragment : fragment_buffer) {
  7281. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  7282. // without adding this leading whitespace, we do not get the same results as the original tokenizer
  7283. // TODO: It's likely possible to get rid of this string copy entirely
  7284. // by modifying llm_tokenizer_x to operate with string offsets like pre-tokenizer
  7285. // and passing 'add space prefix' as bool argument
  7286. //
  7287. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  7288. if (&fragment == &fragment_buffer.front()) {
  7289. if (vocab.add_space_prefix) {
  7290. raw_text = " " + raw_text; // prefix with space if the first token is not special
  7291. }
  7292. }
  7293. #ifdef PRETOKENIZERDEBUG
  7294. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  7295. #endif
  7296. llm_tokenizer_spm tokenizer(vocab);
  7297. llama_escape_whitespace(raw_text);
  7298. tokenizer.tokenize(raw_text, output);
  7299. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  7300. output.push_back(fragment.token);
  7301. }
  7302. }
  7303. } break;
  7304. case LLAMA_VOCAB_TYPE_BPE:
  7305. {
  7306. for (const auto & fragment : fragment_buffer) {
  7307. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  7308. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  7309. #ifdef PRETOKENIZERDEBUG
  7310. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  7311. #endif
  7312. llm_tokenizer_bpe tokenizer(vocab);
  7313. tokenizer.tokenize(raw_text, output);
  7314. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  7315. output.push_back(fragment.token);
  7316. }
  7317. }
  7318. } break;
  7319. case LLAMA_VOCAB_TYPE_WPM:
  7320. {
  7321. for (const auto & fragment : fragment_buffer) {
  7322. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  7323. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  7324. #ifdef PRETOKENIZERDEBUG
  7325. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  7326. #endif
  7327. llm_tokenizer_wpm tokenizer(vocab);
  7328. tokenizer.tokenize(raw_text, output);
  7329. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  7330. output.push_back(fragment.token);
  7331. }
  7332. }
  7333. } break;
  7334. }
  7335. return output;
  7336. }
  7337. //
  7338. // grammar - internal
  7339. //
  7340. struct llama_partial_utf8 {
  7341. uint32_t value; // bit value so far (unshifted)
  7342. int n_remain; // num bytes remaining; -1 indicates invalid sequence
  7343. };
  7344. struct llama_grammar {
  7345. const std::vector<std::vector<llama_grammar_element>> rules;
  7346. std::vector<std::vector<const llama_grammar_element *>> stacks;
  7347. // buffer for partially generated UTF-8 sequence from accepted tokens
  7348. llama_partial_utf8 partial_utf8;
  7349. };
  7350. struct llama_grammar_candidate {
  7351. size_t index;
  7352. const uint32_t * code_points;
  7353. llama_partial_utf8 partial_utf8;
  7354. };
  7355. // Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
  7356. // pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
  7357. static std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
  7358. const std::string & src,
  7359. llama_partial_utf8 partial_start) {
  7360. static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
  7361. const char * pos = src.c_str();
  7362. std::vector<uint32_t> code_points;
  7363. // common english strings have the same number of codepoints and bytes. `+ 1` for the terminating 0.
  7364. code_points.reserve(src.size() + 1);
  7365. uint32_t value = partial_start.value;
  7366. int n_remain = partial_start.n_remain;
  7367. // continue previous decode, if applicable
  7368. while (*pos != 0 && n_remain > 0) {
  7369. uint8_t next_byte = static_cast<uint8_t>(*pos);
  7370. if ((next_byte >> 6) != 2) {
  7371. // invalid sequence, abort
  7372. code_points.push_back(0);
  7373. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, -1 });
  7374. }
  7375. value = (value << 6) + (next_byte & 0x3F);
  7376. ++pos;
  7377. --n_remain;
  7378. }
  7379. if (partial_start.n_remain > 0 && n_remain == 0) {
  7380. code_points.push_back(value);
  7381. }
  7382. // decode any subsequent utf-8 sequences, which may end in an incomplete one
  7383. while (*pos != 0) {
  7384. uint8_t first_byte = static_cast<uint8_t>(*pos);
  7385. uint8_t highbits = first_byte >> 4;
  7386. n_remain = lookup[highbits] - 1;
  7387. if (n_remain < 0) {
  7388. // invalid sequence, abort
  7389. code_points.clear();
  7390. code_points.push_back(0);
  7391. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, n_remain });
  7392. }
  7393. uint8_t mask = (1 << (7 - n_remain)) - 1;
  7394. value = first_byte & mask;
  7395. ++pos;
  7396. while (*pos != 0 && n_remain > 0) {
  7397. value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
  7398. ++pos;
  7399. --n_remain;
  7400. }
  7401. if (n_remain == 0) {
  7402. code_points.push_back(value);
  7403. }
  7404. }
  7405. code_points.push_back(0);
  7406. return std::make_pair(std::move(code_points), llama_partial_utf8{ value, n_remain });
  7407. }
  7408. // returns true iff pos points to the end of one of the definitions of a rule
  7409. static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
  7410. switch (pos->type) {
  7411. case LLAMA_GRETYPE_END: return true; // NOLINT
  7412. case LLAMA_GRETYPE_ALT: return true; // NOLINT
  7413. default: return false;
  7414. }
  7415. }
  7416. // returns true iff chr satisfies the char range at pos (regular or inverse range)
  7417. // asserts that pos is pointing to a char range element
  7418. static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
  7419. const llama_grammar_element * pos,
  7420. const uint32_t chr) {
  7421. bool found = false;
  7422. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  7423. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); // NOLINT
  7424. do {
  7425. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  7426. // inclusive range, e.g. [a-z]
  7427. found = found || (pos->value <= chr && chr <= pos[1].value);
  7428. pos += 2;
  7429. } else {
  7430. // exact char match, e.g. [a] or "a"
  7431. found = found || pos->value == chr;
  7432. pos += 1;
  7433. }
  7434. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  7435. return std::make_pair(found == is_positive_char, pos);
  7436. }
  7437. // returns true iff some continuation of the given partial UTF-8 sequence could satisfy the char
  7438. // range at pos (regular or inverse range)
  7439. // asserts that pos is pointing to a char range element
  7440. static bool llama_grammar_match_partial_char(
  7441. const llama_grammar_element * pos,
  7442. const llama_partial_utf8 partial_utf8) {
  7443. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  7444. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  7445. uint32_t partial_value = partial_utf8.value;
  7446. int n_remain = partial_utf8.n_remain;
  7447. // invalid sequence or 7-bit char split across 2 bytes (overlong)
  7448. if (n_remain < 0 || (n_remain == 1 && partial_value < 2)) {
  7449. return false;
  7450. }
  7451. // range of possible code points this partial UTF-8 sequence could complete to
  7452. uint32_t low = partial_value << (n_remain * 6);
  7453. uint32_t high = low | ((1 << (n_remain * 6)) - 1);
  7454. if (low == 0) {
  7455. if (n_remain == 2) {
  7456. low = 1 << 11;
  7457. } else if (n_remain == 3) {
  7458. low = 1 << 16;
  7459. }
  7460. }
  7461. do {
  7462. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  7463. // inclusive range, e.g. [a-z]
  7464. if (pos->value <= high && low <= pos[1].value) {
  7465. return is_positive_char;
  7466. }
  7467. pos += 2;
  7468. } else {
  7469. // exact char match, e.g. [a] or "a"
  7470. if (low <= pos->value && pos->value <= high) {
  7471. return is_positive_char;
  7472. }
  7473. pos += 1;
  7474. }
  7475. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  7476. return !is_positive_char;
  7477. }
  7478. // transforms a grammar pushdown stack into N possible stacks, all ending
  7479. // at a character range (terminal element)
  7480. static void llama_grammar_advance_stack(
  7481. const std::vector<std::vector<llama_grammar_element>> & rules,
  7482. const std::vector<const llama_grammar_element *> & stack,
  7483. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  7484. if (stack.empty()) {
  7485. new_stacks.emplace_back(stack);
  7486. return;
  7487. }
  7488. const llama_grammar_element * pos = stack.back();
  7489. switch (pos->type) {
  7490. case LLAMA_GRETYPE_RULE_REF: {
  7491. const size_t rule_id = static_cast<size_t>(pos->value);
  7492. const llama_grammar_element * subpos = rules[rule_id].data();
  7493. do {
  7494. // init new stack without the top (pos)
  7495. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  7496. if (!llama_grammar_is_end_of_sequence(pos + 1)) {
  7497. // if this rule ref is followed by another element, add that to stack
  7498. new_stack.push_back(pos + 1);
  7499. }
  7500. if (!llama_grammar_is_end_of_sequence(subpos)) {
  7501. // if alternate is nonempty, add to stack
  7502. new_stack.push_back(subpos);
  7503. }
  7504. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  7505. while (!llama_grammar_is_end_of_sequence(subpos)) {
  7506. // scan to end of alternate def
  7507. subpos++;
  7508. }
  7509. if (subpos->type == LLAMA_GRETYPE_ALT) {
  7510. // there's another alternate def of this rule to process
  7511. subpos++;
  7512. } else {
  7513. break;
  7514. }
  7515. } while (true);
  7516. break;
  7517. }
  7518. case LLAMA_GRETYPE_CHAR:
  7519. case LLAMA_GRETYPE_CHAR_NOT:
  7520. new_stacks.emplace_back(stack);
  7521. break;
  7522. default:
  7523. // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
  7524. // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
  7525. // those
  7526. GGML_ASSERT(false);
  7527. }
  7528. }
  7529. // takes a set of possible pushdown stacks on a grammar, which are required to
  7530. // be positioned at a character range (see `llama_grammar_advance_stack`), and
  7531. // produces the N possible stacks if the given char is accepted at those
  7532. // positions
  7533. static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
  7534. const std::vector<std::vector<llama_grammar_element>> & rules,
  7535. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  7536. const uint32_t chr) {
  7537. std::vector<std::vector<const llama_grammar_element *>> new_stacks;
  7538. for (const auto & stack : stacks) {
  7539. if (stack.empty()) {
  7540. continue;
  7541. }
  7542. auto match = llama_grammar_match_char(stack.back(), chr);
  7543. if (match.first) {
  7544. const llama_grammar_element * pos = match.second;
  7545. // update top of stack to next element, if any
  7546. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  7547. if (!llama_grammar_is_end_of_sequence(pos)) {
  7548. new_stack.push_back(pos);
  7549. }
  7550. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  7551. }
  7552. }
  7553. return new_stacks;
  7554. }
  7555. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  7556. const std::vector<std::vector<llama_grammar_element>> & rules,
  7557. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  7558. const std::vector<llama_grammar_candidate> & candidates);
  7559. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
  7560. const std::vector<std::vector<llama_grammar_element>> & rules,
  7561. const std::vector<const llama_grammar_element *> & stack,
  7562. const std::vector<llama_grammar_candidate> & candidates) {
  7563. std::vector<llama_grammar_candidate> rejects;
  7564. if (stack.empty()) {
  7565. for (const auto & tok : candidates) {
  7566. if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
  7567. rejects.push_back(tok);
  7568. }
  7569. }
  7570. return rejects;
  7571. }
  7572. const llama_grammar_element * stack_pos = stack.back();
  7573. std::vector<llama_grammar_candidate> next_candidates;
  7574. for (const auto & tok : candidates) {
  7575. if (*tok.code_points == 0) {
  7576. // reached end of full codepoints in token, reject iff it ended in a partial sequence
  7577. // that cannot satisfy this position in grammar
  7578. if (tok.partial_utf8.n_remain != 0 &&
  7579. !llama_grammar_match_partial_char(stack_pos, tok.partial_utf8)) {
  7580. rejects.push_back(tok);
  7581. }
  7582. } else if (llama_grammar_match_char(stack_pos, *tok.code_points).first) {
  7583. next_candidates.push_back({ tok.index, tok.code_points + 1, tok.partial_utf8 });
  7584. } else {
  7585. rejects.push_back(tok);
  7586. }
  7587. }
  7588. const auto * stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
  7589. // update top of stack to next element, if any
  7590. std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
  7591. if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
  7592. stack_after.push_back(stack_pos_after);
  7593. }
  7594. std::vector<std::vector<const llama_grammar_element *>> next_stacks;
  7595. llama_grammar_advance_stack(rules, stack_after, next_stacks);
  7596. auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
  7597. for (const auto & tok : next_rejects) {
  7598. rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
  7599. }
  7600. return rejects;
  7601. }
  7602. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  7603. const std::vector<std::vector<llama_grammar_element>> & rules,
  7604. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  7605. const std::vector<llama_grammar_candidate> & candidates) {
  7606. GGML_ASSERT(!stacks.empty()); // REVIEW
  7607. if (candidates.empty()) {
  7608. return std::vector<llama_grammar_candidate>();
  7609. }
  7610. auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
  7611. for (size_t i = 1, size = stacks.size(); i < size; ++i) {
  7612. rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
  7613. }
  7614. return rejects;
  7615. }
  7616. //
  7617. // grammar - external
  7618. //
  7619. struct llama_grammar * llama_grammar_init(
  7620. const llama_grammar_element ** rules,
  7621. size_t n_rules,
  7622. size_t start_rule_index) {
  7623. const llama_grammar_element * pos;
  7624. // copy rule definitions into vectors
  7625. std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
  7626. for (size_t i = 0; i < n_rules; i++) {
  7627. for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
  7628. vec_rules[i].push_back(*pos);
  7629. }
  7630. vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
  7631. }
  7632. // loop over alternates of start rule to build initial stacks
  7633. std::vector<std::vector<const llama_grammar_element *>> stacks;
  7634. pos = rules[start_rule_index];
  7635. do {
  7636. std::vector<const llama_grammar_element *> stack;
  7637. if (!llama_grammar_is_end_of_sequence(pos)) {
  7638. // if alternate is nonempty, add to stack
  7639. stack.push_back(pos);
  7640. }
  7641. llama_grammar_advance_stack(vec_rules, stack, stacks);
  7642. while (!llama_grammar_is_end_of_sequence(pos)) {
  7643. // scan to end of alternate def
  7644. pos++;
  7645. }
  7646. if (pos->type == LLAMA_GRETYPE_ALT) {
  7647. // there's another alternate def of this rule to process
  7648. pos++;
  7649. } else {
  7650. break;
  7651. }
  7652. } while (true);
  7653. return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} };
  7654. }
  7655. void llama_grammar_free(struct llama_grammar * grammar) {
  7656. delete grammar;
  7657. }
  7658. struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar) {
  7659. llama_grammar * result = new llama_grammar{ grammar->rules, grammar->stacks, grammar->partial_utf8 };
  7660. // redirect elements in stacks to point to new rules
  7661. for (size_t is = 0; is < result->stacks.size(); is++) {
  7662. for (size_t ie = 0; ie < result->stacks[is].size(); ie++) {
  7663. for (size_t ir0 = 0; ir0 < grammar->rules.size(); ir0++) {
  7664. for (size_t ir1 = 0; ir1 < grammar->rules[ir0].size(); ir1++) {
  7665. if (grammar->stacks[is][ie] == &grammar->rules[ir0][ir1]) {
  7666. result->stacks[is][ie] = &result->rules[ir0][ir1];
  7667. }
  7668. }
  7669. }
  7670. }
  7671. }
  7672. return result;
  7673. }
  7674. //
  7675. // sampling
  7676. //
  7677. void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
  7678. if (seed == LLAMA_DEFAULT_SEED) {
  7679. seed = time(NULL);
  7680. }
  7681. ctx->rng.seed(seed);
  7682. }
  7683. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  7684. GGML_ASSERT(candidates->size > 0);
  7685. const int64_t t_start_sample_us = ggml_time_us();
  7686. // Sort the logits in descending order
  7687. if (!candidates->sorted) {
  7688. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  7689. return a.logit > b.logit;
  7690. });
  7691. candidates->sorted = true;
  7692. }
  7693. float max_l = candidates->data[0].logit;
  7694. float cum_sum = 0.0f;
  7695. for (size_t i = 0; i < candidates->size; ++i) {
  7696. float p = expf(candidates->data[i].logit - max_l);
  7697. candidates->data[i].p = p;
  7698. cum_sum += p;
  7699. }
  7700. for (size_t i = 0; i < candidates->size; ++i) {
  7701. candidates->data[i].p /= cum_sum;
  7702. }
  7703. if (ctx) {
  7704. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7705. }
  7706. }
  7707. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int32_t k, size_t min_keep) {
  7708. // TODO: move bucket sort to separate function so that top_p/tail_free/typical/softmax first is equally fast
  7709. // if (k >= (int32_t)candidates->size) {
  7710. // return;
  7711. // }
  7712. const int64_t t_start_sample_us = ggml_time_us();
  7713. if (k <= 0) {
  7714. k = candidates->size;
  7715. }
  7716. k = std::max(k, (int) min_keep);
  7717. k = std::min(k, (int) candidates->size);
  7718. // Sort scores in descending order
  7719. if (!candidates->sorted) {
  7720. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  7721. return a.logit > b.logit;
  7722. };
  7723. if (k <= 128) {
  7724. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  7725. } else {
  7726. constexpr int nbuckets = 128;
  7727. constexpr float bucket_low = -10.0f;
  7728. constexpr float bucket_high = 10.0f;
  7729. constexpr float bucket_scale = nbuckets/(bucket_high - bucket_low);
  7730. constexpr float bucker_inter = -bucket_low * bucket_scale;
  7731. std::vector<int> bucket_idx(candidates->size);
  7732. std::vector<int> histo(nbuckets, 0);
  7733. for (int i = 0; i < (int)candidates->size; ++i) {
  7734. const float val = candidates->data[i].logit;
  7735. int ib = int(bucket_scale * val + bucker_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low);
  7736. ib = std::max(0, std::min(nbuckets-1, ib));
  7737. bucket_idx[i] = ib;
  7738. ++histo[ib];
  7739. }
  7740. int nhave = 0;
  7741. int ib = nbuckets - 1;
  7742. for ( ; ib >= 0; --ib) {
  7743. nhave += histo[ib];
  7744. if (nhave >= k) break;
  7745. }
  7746. std::vector<llama_token_data> tmp_tokens(nhave);
  7747. auto ptr = tmp_tokens.data();
  7748. std::vector<llama_token_data*> bucket_ptrs;
  7749. bucket_ptrs.reserve(nbuckets - ib);
  7750. for (int j = nbuckets - 1; j >= ib; --j) {
  7751. bucket_ptrs.push_back(ptr);
  7752. ptr += histo[j];
  7753. }
  7754. for (int i = 0; i < (int)candidates->size; ++i) {
  7755. int j = bucket_idx[i];
  7756. if (j >= ib) {
  7757. *bucket_ptrs[nbuckets-1-j]++ = candidates->data[i];
  7758. }
  7759. }
  7760. ptr = tmp_tokens.data();
  7761. int ndone = 0;
  7762. for (int j = nbuckets-1; j > ib; --j) {
  7763. std::sort(ptr, ptr + histo[j], comp);
  7764. ptr += histo[j];
  7765. ndone += histo[j];
  7766. }
  7767. std::partial_sort(ptr, ptr + k - ndone, ptr + histo[ib], comp);
  7768. std::memcpy(candidates->data, tmp_tokens.data(), k*sizeof(llama_token_data));
  7769. }
  7770. candidates->sorted = true;
  7771. }
  7772. candidates->size = k;
  7773. if (ctx) {
  7774. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7775. }
  7776. }
  7777. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  7778. if (p >= 1.0f) {
  7779. return;
  7780. }
  7781. llama_sample_softmax(ctx, candidates);
  7782. const int64_t t_start_sample_us = ggml_time_us();
  7783. // Compute the cumulative probabilities
  7784. float cum_sum = 0.0f;
  7785. size_t last_idx = candidates->size;
  7786. for (size_t i = 0; i < candidates->size; ++i) {
  7787. cum_sum += candidates->data[i].p;
  7788. // Check if the running sum is at least p or if we have kept at least min_keep tokens
  7789. // we set the last index to i+1 to indicate that the current iterate should be included in the set
  7790. if (cum_sum >= p && i + 1 >= min_keep) {
  7791. last_idx = i + 1;
  7792. break;
  7793. }
  7794. }
  7795. // Resize the output vector to keep only the top-p tokens
  7796. candidates->size = last_idx;
  7797. if (ctx) {
  7798. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7799. }
  7800. }
  7801. void llama_sample_min_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  7802. if (p <= 0.0f || !candidates->size) {
  7803. return;
  7804. }
  7805. const int64_t t_start_sample_us = ggml_time_us();
  7806. bool min_p_applied = false;
  7807. // if the candidates aren't sorted, try the unsorted implementation first
  7808. if (!candidates->sorted) {
  7809. std::vector<llama_token_data> filtered_tokens;
  7810. float max_logit = -FLT_MAX;
  7811. for (size_t i = 0; i < candidates->size; ++i) {
  7812. max_logit = std::max(max_logit, candidates->data[i].logit);
  7813. }
  7814. const float min_logit = max_logit + logf(p); // min logit for p_i >= p * p_max
  7815. for (size_t i = 0; i < candidates->size; ++i) {
  7816. if (candidates->data[i].logit >= min_logit) {
  7817. filtered_tokens.push_back(candidates->data[i]);
  7818. }
  7819. }
  7820. // if we have enough values the operation was a success
  7821. if (filtered_tokens.size() >= min_keep) {
  7822. memcpy(candidates->data, filtered_tokens.data(), filtered_tokens.size()*sizeof(llama_token_data));
  7823. candidates->size = filtered_tokens.size();
  7824. min_p_applied = true;
  7825. }
  7826. }
  7827. // if the candidates are sorted or the unsorted implementation failed, use this implementation
  7828. if (!min_p_applied) {
  7829. // Sort the logits in descending order
  7830. if (!candidates->sorted) {
  7831. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  7832. return a.logit > b.logit;
  7833. });
  7834. candidates->sorted = true;
  7835. }
  7836. const float min_logit = candidates->data[0].logit + logf(p); // min logit for p_i >= p * p_max
  7837. size_t i = 1; // first token always matches
  7838. for (; i < candidates->size; ++i) {
  7839. if (candidates->data[i].logit < min_logit && i >= min_keep) {
  7840. break; // prob too small
  7841. }
  7842. }
  7843. // Resize the output vector to keep only the matching tokens
  7844. candidates->size = i;
  7845. }
  7846. if (ctx) {
  7847. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7848. }
  7849. }
  7850. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  7851. if (z >= 1.0f || candidates->size <= 2) {
  7852. return;
  7853. }
  7854. llama_sample_softmax(nullptr, candidates);
  7855. const int64_t t_start_sample_us = ggml_time_us();
  7856. // Compute the first and second derivatives
  7857. std::vector<float> first_derivatives(candidates->size - 1);
  7858. std::vector<float> second_derivatives(candidates->size - 2);
  7859. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  7860. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  7861. }
  7862. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  7863. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  7864. }
  7865. // Calculate absolute value of second derivatives
  7866. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  7867. second_derivatives[i] = std::abs(second_derivatives[i]);
  7868. }
  7869. // Normalize the second derivatives
  7870. {
  7871. const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  7872. if (second_derivatives_sum > 1e-6f) {
  7873. for (float & value : second_derivatives) {
  7874. value /= second_derivatives_sum;
  7875. }
  7876. } else {
  7877. for (float & value : second_derivatives) {
  7878. value = 1.0f / second_derivatives.size();
  7879. }
  7880. }
  7881. }
  7882. float cum_sum = 0.0f;
  7883. size_t last_idx = candidates->size;
  7884. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  7885. cum_sum += second_derivatives[i];
  7886. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  7887. if (cum_sum > z && i >= min_keep) {
  7888. last_idx = i;
  7889. break;
  7890. }
  7891. }
  7892. // Resize the output vector to keep only the tokens above the tail location
  7893. candidates->size = last_idx;
  7894. if (ctx) {
  7895. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7896. }
  7897. }
  7898. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  7899. // Reference implementation:
  7900. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  7901. if (p >= 1.0f) {
  7902. return;
  7903. }
  7904. // Compute the softmax of logits and calculate entropy
  7905. llama_sample_softmax(nullptr, candidates);
  7906. const int64_t t_start_sample_us = ggml_time_us();
  7907. float entropy = 0.0f;
  7908. for (size_t i = 0; i < candidates->size; ++i) {
  7909. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  7910. }
  7911. // Compute the absolute difference between negative log probability and entropy for each candidate
  7912. std::vector<float> shifted_scores;
  7913. for (size_t i = 0; i < candidates->size; ++i) {
  7914. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  7915. shifted_scores.push_back(shifted_score);
  7916. }
  7917. // Sort tokens based on the shifted_scores and their corresponding indices
  7918. std::vector<size_t> indices(candidates->size);
  7919. std::iota(indices.begin(), indices.end(), 0);
  7920. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  7921. return shifted_scores[a] < shifted_scores[b];
  7922. });
  7923. // Compute the cumulative probabilities
  7924. float cum_sum = 0.0f;
  7925. size_t last_idx = indices.size();
  7926. for (size_t i = 0; i < indices.size(); ++i) {
  7927. size_t idx = indices[i];
  7928. cum_sum += candidates->data[idx].p;
  7929. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  7930. if (cum_sum > p && i >= min_keep - 1) {
  7931. last_idx = i + 1;
  7932. break;
  7933. }
  7934. }
  7935. // Resize the output vector to keep only the locally typical tokens
  7936. std::vector<llama_token_data> new_candidates;
  7937. for (size_t i = 0; i < last_idx; ++i) {
  7938. size_t idx = indices[i];
  7939. new_candidates.push_back(candidates->data[idx]);
  7940. }
  7941. // Replace the data in candidates with the new_candidates data
  7942. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  7943. candidates->size = new_candidates.size();
  7944. candidates->sorted = false;
  7945. if (ctx) {
  7946. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7947. }
  7948. }
  7949. void llama_sample_entropy(struct llama_context * ctx, llama_token_data_array * candidates_p, float min_temp, float max_temp, float exponent_val) {
  7950. const int64_t t_start_sample_us = ggml_time_us();
  7951. // no need to do anything if there is only one (or zero) candidates
  7952. if(candidates_p->size <= 1) {
  7953. return;
  7954. }
  7955. // Calculate maximum possible entropy
  7956. float max_entropy = -logf(1.0f / candidates_p->size);
  7957. llama_sample_softmax(nullptr, candidates_p);
  7958. // Calculate entropy of the softmax probabilities
  7959. float entropy = 0.0f;
  7960. for (size_t i = 0; i < candidates_p->size; ++i) {
  7961. float prob = candidates_p->data[i].p;
  7962. if (prob > 0.0f) { // Ensure no log(0)
  7963. entropy -= prob * logf(prob);
  7964. }
  7965. }
  7966. // Normalize the entropy (max_entropy cannot be 0 here because we checked candidates_p->size != 1 above)
  7967. float normalized_entropy = entropy / max_entropy;
  7968. // Map the normalized entropy to the desired temperature range using the power function
  7969. float dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent_val);
  7970. #ifdef DEBUG
  7971. LLAMA_LOG_INFO("Your text maxtemp value is: %f\n", max_temp);
  7972. LLAMA_LOG_INFO("Entropy: %f\n", entropy);
  7973. LLAMA_LOG_INFO("Max Possible Entropy: %f\n", max_entropy);
  7974. LLAMA_LOG_INFO("Normalized Entropy: %f\n", normalized_entropy);
  7975. LLAMA_LOG_INFO("Exponent: %f\n", exponent_val);
  7976. LLAMA_LOG_INFO("Dynamic Temperature (dyn_temp): %f\n", dyn_temp);
  7977. #endif
  7978. // Apply the dynamically calculated temperature scaling
  7979. for (size_t i = 0; i < candidates_p->size; ++i) {
  7980. candidates_p->data[i].logit /= dyn_temp;
  7981. }
  7982. // Re-compute softmax probabilities after scaling logits with dynamic temperature
  7983. double max_l_double = candidates_p->data[0].logit;
  7984. double cum_sum_double = 0.0;
  7985. for (size_t i = 0; i < candidates_p->size; ++i) {
  7986. double p = exp(candidates_p->data[i].logit - max_l_double);
  7987. candidates_p->data[i].p = p; // Store the scaled probability
  7988. cum_sum_double += p;
  7989. }
  7990. for (size_t i = 0; i < candidates_p->size; ++i) {
  7991. candidates_p->data[i].p /= cum_sum_double; // Re-normalize the probabilities
  7992. }
  7993. #ifdef DEBUG
  7994. // Print the updated top 25 probabilities after temperature scaling
  7995. LLAMA_LOG_INFO("\nUpdated Top 25 Probabilities After Dynamic Temperature Scaling (in percentages):\n");
  7996. for (size_t i = 0; i < 25 && i < candidates_p->size; ++i) {
  7997. LLAMA_LOG_INFO("Token %zu: %f%%\n", i + 1, candidates_p->data[i].p * 100.0f);
  7998. }
  7999. #endif
  8000. if (ctx) {
  8001. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8002. }
  8003. }
  8004. void llama_sample_temp(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  8005. const int64_t t_start_sample_us = ggml_time_us();
  8006. for (size_t i = 0; i < candidates_p->size; ++i) {
  8007. candidates_p->data[i].logit /= temp;
  8008. }
  8009. if (ctx) {
  8010. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8011. }
  8012. }
  8013. void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  8014. llama_sample_temp(ctx, candidates_p, temp);
  8015. }
  8016. void llama_sample_repetition_penalties(
  8017. struct llama_context * ctx,
  8018. llama_token_data_array * candidates,
  8019. const llama_token * last_tokens,
  8020. size_t penalty_last_n,
  8021. float penalty_repeat,
  8022. float penalty_freq,
  8023. float penalty_present) {
  8024. if (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f)) {
  8025. return;
  8026. }
  8027. const int64_t t_start_sample_us = ggml_time_us();
  8028. // Create a frequency map to count occurrences of each token in last_tokens
  8029. std::unordered_map<llama_token, int> token_count;
  8030. for (size_t i = 0; i < penalty_last_n; ++i) {
  8031. token_count[last_tokens[i]]++;
  8032. }
  8033. // Apply frequency and presence penalties to the candidates
  8034. for (size_t i = 0; i < candidates->size; ++i) {
  8035. const auto token_iter = token_count.find(candidates->data[i].id);
  8036. if (token_iter == token_count.end()) {
  8037. continue;
  8038. }
  8039. const int count = token_iter->second;
  8040. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  8041. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  8042. if (candidates->data[i].logit <= 0) {
  8043. candidates->data[i].logit *= penalty_repeat;
  8044. } else {
  8045. candidates->data[i].logit /= penalty_repeat;
  8046. }
  8047. candidates->data[i].logit -= float(count) * penalty_freq + float(count > 0) * penalty_present;
  8048. }
  8049. candidates->sorted = false;
  8050. if (ctx) {
  8051. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8052. }
  8053. }
  8054. void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
  8055. GGML_ASSERT(ctx);
  8056. const int64_t t_start_sample_us = ggml_time_us();
  8057. bool allow_eos = false;
  8058. for (const auto & stack : grammar->stacks) {
  8059. if (stack.empty()) {
  8060. allow_eos = true;
  8061. break;
  8062. }
  8063. }
  8064. const llama_token eos = llama_token_eos(&ctx->model);
  8065. std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
  8066. candidates_decoded.reserve(candidates->size);
  8067. std::vector<llama_grammar_candidate> candidates_grammar;
  8068. candidates_grammar.reserve(candidates->size);
  8069. for (size_t i = 0; i < candidates->size; ++i) {
  8070. const llama_token id = candidates->data[i].id;
  8071. const std::string piece = llama_token_to_piece(ctx, id);
  8072. if (id == eos) {
  8073. if (!allow_eos) {
  8074. candidates->data[i].logit = -INFINITY;
  8075. }
  8076. } else if (piece.empty() || piece[0] == 0) {
  8077. candidates->data[i].logit = -INFINITY;
  8078. } else {
  8079. candidates_decoded.push_back(decode_utf8(piece, grammar->partial_utf8));
  8080. candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second });
  8081. }
  8082. }
  8083. const auto rejects = llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
  8084. for (const auto & reject : rejects) {
  8085. candidates->data[reject.index].logit = -INFINITY;
  8086. }
  8087. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8088. }
  8089. static void llama_log_softmax(float * array, size_t size) {
  8090. float max_l = *std::max_element(array, array + size);
  8091. float sum = 0.f;
  8092. for (size_t i = 0; i < size; ++i) {
  8093. float p = expf(array[i] - max_l);
  8094. sum += p;
  8095. array[i] = p;
  8096. }
  8097. for (size_t i = 0; i < size; ++i) {
  8098. array[i] = logf(array[i] / sum);
  8099. }
  8100. }
  8101. void llama_sample_apply_guidance(
  8102. struct llama_context * ctx,
  8103. float * logits,
  8104. float * logits_guidance,
  8105. float scale) {
  8106. GGML_ASSERT(ctx);
  8107. const auto t_start_sample_us = ggml_time_us();
  8108. const auto n_vocab = llama_n_vocab(llama_get_model(ctx));
  8109. llama_log_softmax(logits, n_vocab);
  8110. llama_log_softmax(logits_guidance, n_vocab);
  8111. for (int i = 0; i < n_vocab; ++i) {
  8112. auto & l = logits[i];
  8113. const auto & g = logits_guidance[i];
  8114. l = scale * (l - g) + g;
  8115. }
  8116. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8117. }
  8118. void llama_sample_classifier_free_guidance(
  8119. struct llama_context * ctx,
  8120. llama_token_data_array * candidates,
  8121. struct llama_context * guidance_ctx,
  8122. float scale) {
  8123. GGML_ASSERT(ctx);
  8124. int64_t t_start_sample_us;
  8125. t_start_sample_us = ggml_time_us();
  8126. const size_t n_vocab = llama_n_vocab(llama_get_model(ctx));
  8127. GGML_ASSERT(n_vocab == candidates->size);
  8128. GGML_ASSERT(!candidates->sorted);
  8129. std::vector<float> logits_base(n_vocab);
  8130. for (size_t i = 0; i < n_vocab; ++i) {
  8131. logits_base[i] = candidates->data[i].logit;
  8132. }
  8133. float * logits_guidance = llama_get_logits(guidance_ctx);
  8134. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8135. llama_sample_apply_guidance(ctx, logits_base.data(), logits_guidance, scale);
  8136. t_start_sample_us = ggml_time_us();
  8137. for (size_t i = 0; i < n_vocab; ++i) {
  8138. candidates->data[i].logit = logits_base[i];
  8139. }
  8140. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8141. }
  8142. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int32_t m, float * mu) {
  8143. GGML_ASSERT(ctx);
  8144. auto N = float(llama_n_vocab(llama_get_model(ctx)));
  8145. int64_t t_start_sample_us;
  8146. t_start_sample_us = ggml_time_us();
  8147. llama_sample_softmax(nullptr, candidates);
  8148. // Estimate s_hat using the most probable m tokens
  8149. float s_hat = 0.0;
  8150. float sum_ti_bi = 0.0;
  8151. float sum_ti_sq = 0.0;
  8152. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  8153. float t_i = logf(float(i + 2) / float(i + 1));
  8154. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  8155. sum_ti_bi += t_i * b_i;
  8156. sum_ti_sq += t_i * t_i;
  8157. }
  8158. s_hat = sum_ti_bi / sum_ti_sq;
  8159. // Compute k from the estimated s_hat and target surprise value
  8160. float epsilon_hat = s_hat - 1;
  8161. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  8162. // Sample the next word X using top-k sampling
  8163. llama_sample_top_k(nullptr, candidates, int(k), 1);
  8164. if (ctx) {
  8165. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8166. }
  8167. llama_token X = llama_sample_token(ctx, candidates);
  8168. t_start_sample_us = ggml_time_us();
  8169. // Compute error as the difference between observed surprise and target surprise value
  8170. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  8171. return candidate.id == X;
  8172. }));
  8173. float observed_surprise = -log2f(candidates->data[X_idx].p);
  8174. float e = observed_surprise - tau;
  8175. // Update mu using the learning rate and error
  8176. *mu = *mu - eta * e;
  8177. if (ctx) {
  8178. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8179. }
  8180. return X;
  8181. }
  8182. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  8183. int64_t t_start_sample_us;
  8184. t_start_sample_us = ggml_time_us();
  8185. llama_sample_softmax(ctx, candidates);
  8186. // Truncate the words with surprise values greater than mu
  8187. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  8188. return -log2f(candidate.p) > *mu;
  8189. }));
  8190. if (candidates->size == 0) {
  8191. candidates->size = 1;
  8192. }
  8193. if (ctx) {
  8194. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8195. }
  8196. // Normalize the probabilities of the remaining words
  8197. llama_sample_softmax(ctx, candidates);
  8198. // Sample the next word X from the remaining words
  8199. llama_token X = llama_sample_token(ctx, candidates);
  8200. t_start_sample_us = ggml_time_us();
  8201. // Compute error as the difference between observed surprise and target surprise value
  8202. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  8203. return candidate.id == X;
  8204. }));
  8205. float observed_surprise = -log2f(candidates->data[X_idx].p);
  8206. float e = observed_surprise - tau;
  8207. // Update mu using the learning rate and error
  8208. *mu = *mu - eta * e;
  8209. if (ctx) {
  8210. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8211. }
  8212. return X;
  8213. }
  8214. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  8215. const int64_t t_start_sample_us = ggml_time_us();
  8216. // Find max element
  8217. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  8218. return a.logit < b.logit;
  8219. });
  8220. llama_token result = max_iter->id;
  8221. if (ctx) {
  8222. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8223. ctx->n_sample++;
  8224. }
  8225. return result;
  8226. }
  8227. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  8228. GGML_ASSERT(ctx);
  8229. const int64_t t_start_sample_us = ggml_time_us();
  8230. llama_sample_softmax(nullptr, candidates);
  8231. std::vector<float> probs;
  8232. probs.reserve(candidates->size);
  8233. for (size_t i = 0; i < candidates->size; ++i) {
  8234. probs.push_back(candidates->data[i].p);
  8235. }
  8236. std::discrete_distribution<> dist(probs.begin(), probs.end());
  8237. auto & rng = ctx->rng;
  8238. int idx = dist(rng);
  8239. llama_token result = candidates->data[idx].id;
  8240. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8241. ctx->n_sample++;
  8242. return result;
  8243. }
  8244. void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
  8245. const int64_t t_start_sample_us = ggml_time_us();
  8246. if (token == llama_token_eos(&ctx->model)) {
  8247. for (const auto & stack : grammar->stacks) {
  8248. if (stack.empty()) {
  8249. return;
  8250. }
  8251. }
  8252. GGML_ASSERT(false);
  8253. }
  8254. const std::string piece = llama_token_to_piece(ctx, token);
  8255. // Note terminating 0 in decoded string
  8256. const auto decoded = decode_utf8(piece, grammar->partial_utf8);
  8257. const auto & code_points = decoded.first;
  8258. for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
  8259. grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
  8260. }
  8261. grammar->partial_utf8 = decoded.second;
  8262. GGML_ASSERT(!grammar->stacks.empty());
  8263. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8264. }
  8265. //
  8266. // Beam search
  8267. //
  8268. struct llama_beam {
  8269. std::vector<llama_token> tokens;
  8270. float p; // Cumulative beam probability (renormalized relative to all beams)
  8271. bool eob; // Initialize end-of-beam to false. Callback sets this to true.
  8272. // Sort beams by probability. In case of ties, prefer beams at eob.
  8273. bool operator<(const llama_beam & rhs) const {
  8274. return std::make_pair(p, eob) < std::make_pair(rhs.p, rhs.eob);
  8275. }
  8276. // Shift off first n tokens and discard them.
  8277. void shift_tokens(const size_t n) {
  8278. if (n) {
  8279. std::copy(tokens.begin() + n, tokens.end(), tokens.begin());
  8280. tokens.resize(tokens.size() - n);
  8281. }
  8282. }
  8283. llama_beam_view view() const { return {tokens.data(), tokens.size(), p, eob}; }
  8284. };
  8285. // A struct for calculating logit-related info.
  8286. struct llama_logit_info {
  8287. const float * const logits;
  8288. const int n_vocab;
  8289. const float max_l;
  8290. const float normalizer;
  8291. struct sum_exp {
  8292. float max_l;
  8293. float operator()(float sum, float l) const { return sum + std::exp(l - max_l); }
  8294. };
  8295. llama_logit_info(llama_context * ctx)
  8296. : logits(llama_get_logits(ctx))
  8297. , n_vocab(llama_n_vocab(llama_get_model(ctx)))
  8298. , max_l(*std::max_element(logits, logits + n_vocab))
  8299. , normalizer(1.0f / std::accumulate(logits, logits + n_vocab, 0.0f, sum_exp{max_l}))
  8300. { }
  8301. llama_token_data get_token_data(const llama_token token_id) const {
  8302. constexpr auto p = std::numeric_limits<float>::quiet_NaN(); // never used
  8303. return {token_id, logits[token_id], p};
  8304. }
  8305. // Return top k token_data by logit.
  8306. std::vector<llama_token_data> top_k(size_t k) {
  8307. std::vector<llama_token_data> min_heap; // min-heap by logit
  8308. const llama_token k_min = std::min(static_cast<llama_token>(k), n_vocab);
  8309. min_heap.reserve(k_min);
  8310. for (llama_token token_id = 0 ; token_id < k_min ; ++token_id) {
  8311. min_heap.push_back(get_token_data(token_id));
  8312. }
  8313. auto comp = [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; };
  8314. std::make_heap(min_heap.begin(), min_heap.end(), comp);
  8315. for (llama_token token_id = k_min ; token_id < n_vocab ; ++token_id) {
  8316. if (min_heap.front().logit < logits[token_id]) {
  8317. std::pop_heap(min_heap.begin(), min_heap.end(), comp);
  8318. min_heap.back().id = token_id;
  8319. min_heap.back().logit = logits[token_id];
  8320. std::push_heap(min_heap.begin(), min_heap.end(), comp);
  8321. }
  8322. }
  8323. return min_heap;
  8324. }
  8325. float probability_from_logit(float logit) const {
  8326. return normalizer * std::exp(logit - max_l);
  8327. }
  8328. };
  8329. struct llama_beam_search_data {
  8330. llama_context * ctx;
  8331. size_t n_beams;
  8332. int n_past;
  8333. int n_predict;
  8334. std::vector<llama_beam> beams;
  8335. std::vector<llama_beam> next_beams;
  8336. // Re-calculated on each loop iteration
  8337. size_t common_prefix_length;
  8338. // Used to communicate to/from callback on beams state.
  8339. std::vector<llama_beam_view> beam_views;
  8340. llama_beam_search_data(llama_context * ctx, size_t n_beams, int n_past, int n_predict)
  8341. : ctx(ctx)
  8342. , n_beams(n_beams)
  8343. , n_past(n_past)
  8344. , n_predict(n_predict)
  8345. , beam_views(n_beams) {
  8346. beams.reserve(n_beams);
  8347. next_beams.reserve(n_beams);
  8348. }
  8349. // Collapse beams to a single beam given by index.
  8350. void collapse_beams(const size_t beam_idx) {
  8351. if (0u < beam_idx) {
  8352. std::swap(beams[0], beams[beam_idx]);
  8353. }
  8354. beams.resize(1);
  8355. }
  8356. // Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
  8357. // The repetitive patterns below reflect the 2 stages of heaps:
  8358. // * Gather elements until the vector is full, then call std::make_heap() on it.
  8359. // * If the heap is full and a new element is found that should be included, pop the
  8360. // least element to the back(), replace it with the new, then push it into the heap.
  8361. void fill_next_beams_by_top_probabilities(llama_beam & beam) {
  8362. // Min-heaps use a greater-than comparator.
  8363. const auto comp = [](const llama_beam & a, const llama_beam & b) { return a.p > b.p; };
  8364. if (beam.eob) {
  8365. // beam is at end-of-sentence, so just copy it to next_beams if its probability is high enough.
  8366. if (next_beams.size() < n_beams) {
  8367. next_beams.push_back(std::move(beam));
  8368. if (next_beams.size() == n_beams) {
  8369. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  8370. }
  8371. } else if (next_beams.front().p < beam.p) {
  8372. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  8373. next_beams.back() = std::move(beam);
  8374. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  8375. }
  8376. } else {
  8377. // beam is not at end-of-sentence, so branch with next top_k tokens.
  8378. if (!beam.tokens.empty()) {
  8379. llama_decode(ctx, llama_batch_get_one(beam.tokens.data(), beam.tokens.size(), n_past, 0));
  8380. }
  8381. llama_logit_info logit_info(ctx);
  8382. std::vector<llama_token_data> next_tokens = logit_info.top_k(n_beams);
  8383. size_t i=0;
  8384. if (next_beams.size() < n_beams) {
  8385. for (; next_beams.size() < n_beams ; ++i) {
  8386. llama_beam next_beam = beam;
  8387. next_beam.tokens.push_back(next_tokens[i].id);
  8388. next_beam.p *= logit_info.probability_from_logit(next_tokens[i].logit);
  8389. next_beams.push_back(std::move(next_beam));
  8390. }
  8391. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  8392. } else {
  8393. for (; next_beams.front().p == 0.0f ; ++i) {
  8394. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  8395. next_beams.back() = beam;
  8396. next_beams.back().tokens.push_back(next_tokens[i].id);
  8397. next_beams.back().p *= logit_info.probability_from_logit(next_tokens[i].logit);
  8398. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  8399. }
  8400. }
  8401. for (; i < n_beams ; ++i) {
  8402. const float next_p = beam.p * logit_info.probability_from_logit(next_tokens[i].logit);
  8403. if (next_beams.front().p < next_p) {
  8404. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  8405. next_beams.back() = beam;
  8406. next_beams.back().tokens.push_back(next_tokens[i].id);
  8407. next_beams.back().p = next_p;
  8408. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  8409. }
  8410. }
  8411. }
  8412. }
  8413. // Find common_prefix_length based on beams.
  8414. // Requires beams is not empty.
  8415. size_t find_common_prefix_length() {
  8416. size_t common_prefix_length = beams[0].tokens.size();
  8417. for (size_t i = 1 ; i < beams.size() ; ++i) {
  8418. common_prefix_length = std::min(common_prefix_length, beams[i].tokens.size());
  8419. for (size_t j = 0 ; j < common_prefix_length ; ++j) {
  8420. if (beams[0].tokens[j] != beams[i].tokens[j]) {
  8421. common_prefix_length = j;
  8422. break;
  8423. }
  8424. }
  8425. }
  8426. return common_prefix_length;
  8427. }
  8428. // Construct beams_state to send back to caller via the callback function.
  8429. // Side effect: set common_prefix_length = find_common_prefix_length();
  8430. llama_beams_state get_beams_state(const bool last_call) {
  8431. for (size_t i = 0 ; i < beams.size() ; ++i) {
  8432. beam_views[i] = beams[i].view();
  8433. }
  8434. common_prefix_length = find_common_prefix_length();
  8435. return {beam_views.data(), beams.size(), common_prefix_length, last_call};
  8436. }
  8437. // Loop:
  8438. // * while i < n_predict, AND
  8439. // * any of the beams have not yet reached end-of-beam (eob), AND
  8440. // * the highest probability beam(s) (plural in case of ties) are not at end-of-sentence
  8441. // (since all other beam probabilities can only decrease)
  8442. void loop(const llama_beam_search_callback_fn_t callback, void * const callback_data) {
  8443. beams.push_back({{}, 1.0f, false}); // Start with one empty beam w/ probability = 1.0 and !eob.
  8444. const auto not_eob = [](const llama_beam & beam) { return !beam.eob; };
  8445. for (int i = 0 ; i < n_predict && std::any_of(beams.begin(),beams.end(),not_eob) &&
  8446. !beams[top_beam_index()].eob ; ++i) {
  8447. callback(callback_data, get_beams_state(false)); // Sets common_prefix_length
  8448. update_beams_from_beam_views(); // Update values (p,eob) that callback may have changed.
  8449. if (common_prefix_length) {
  8450. llama_decode(ctx, llama_batch_get_one(beams[0].tokens.data(), common_prefix_length, n_past, 0));
  8451. n_past += common_prefix_length;
  8452. }
  8453. // Zero-out next_beam probabilities to place them last in following min-heap.
  8454. std::for_each(next_beams.begin(), next_beams.end(), [](llama_beam & beam) { beam.p = 0.0f; });
  8455. for (llama_beam & beam : beams) {
  8456. beam.shift_tokens(common_prefix_length);
  8457. fill_next_beams_by_top_probabilities(beam);
  8458. }
  8459. // next_beams become the beams of next/final iteration. Swap them to re-use memory.
  8460. beams.swap(next_beams);
  8461. renormalize_beam_probabilities(beams);
  8462. }
  8463. collapse_beams(top_beam_index());
  8464. callback(callback_data, get_beams_state(true));
  8465. }
  8466. // As beams grow, the cumulative probabilities decrease.
  8467. // Renormalize them to avoid floating point underflow.
  8468. static void renormalize_beam_probabilities(std::vector<llama_beam> & beams) {
  8469. const auto sum_p = [](float sum, llama_beam & beam) { return sum + beam.p; };
  8470. const float inv_sum = 1.0f / std::accumulate(beams.begin(), beams.end(), 0.0f, sum_p);
  8471. std::for_each(beams.begin(), beams.end(), [=](llama_beam & beam) { beam.p *= inv_sum; });
  8472. }
  8473. // Assumes beams is non-empty. Uses llama_beam::operator<() for ordering.
  8474. size_t top_beam_index() {
  8475. return std::max_element(beams.begin(), beams.end()) - beams.begin();
  8476. }
  8477. // Copy (p,eob) for each beam which may have been changed by the callback.
  8478. void update_beams_from_beam_views() {
  8479. for (size_t i = 0 ; i < beams.size() ; ++i) {
  8480. beams[i].p = beam_views[i].p;
  8481. beams[i].eob = beam_views[i].eob;
  8482. }
  8483. }
  8484. };
  8485. void llama_beam_search(llama_context * ctx,
  8486. llama_beam_search_callback_fn_t callback, void * callback_data,
  8487. size_t n_beams, int n_past, int n_predict) {
  8488. assert(ctx);
  8489. const int64_t t_start_sample_us = ggml_time_us();
  8490. llama_beam_search_data beam_search_data(ctx, n_beams, n_past, n_predict);
  8491. beam_search_data.loop(callback, callback_data);
  8492. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  8493. ctx->n_sample++;
  8494. }
  8495. //
  8496. // quantization
  8497. //
  8498. struct quantize_state_internal {
  8499. const llama_model & model;
  8500. const llama_model_quantize_params * params;
  8501. int n_attention_wv = 0;
  8502. int n_ffn_down = 0;
  8503. int n_ffn_gate = 0;
  8504. int n_ffn_up = 0;
  8505. int i_attention_wv = 0;
  8506. int i_ffn_down = 0;
  8507. int i_ffn_gate = 0;
  8508. int i_ffn_up = 0;
  8509. int n_k_quantized = 0;
  8510. int n_fallback = 0;
  8511. bool has_imatrix = false;
  8512. quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
  8513. : model(model)
  8514. , params(params)
  8515. {}
  8516. };
  8517. static void llama_convert_tensor_internal(
  8518. struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
  8519. const size_t nelements, const int nthread
  8520. ) {
  8521. if (output.size() < nelements) {
  8522. output.resize(nelements);
  8523. }
  8524. float * f32_output = (float *) output.data();
  8525. ggml_type_traits_t qtype;
  8526. if (ggml_is_quantized(tensor->type)) {
  8527. qtype = ggml_internal_get_type_traits(tensor->type);
  8528. if (qtype.to_float == NULL) {
  8529. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
  8530. }
  8531. } else if (tensor->type != GGML_TYPE_F16) {
  8532. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
  8533. }
  8534. if (nthread < 2) {
  8535. if (tensor->type == GGML_TYPE_F16) {
  8536. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
  8537. } else if (ggml_is_quantized(tensor->type)) {
  8538. qtype.to_float(tensor->data, f32_output, nelements);
  8539. } else {
  8540. GGML_ASSERT(false); // unreachable
  8541. }
  8542. return;
  8543. }
  8544. size_t block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type);
  8545. size_t block_size_bytes = ggml_type_size(tensor->type);
  8546. GGML_ASSERT(nelements % block_size == 0);
  8547. size_t nblocks = nelements / block_size;
  8548. size_t blocks_per_thread = nblocks / nthread;
  8549. size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  8550. size_t in_buff_offs = 0;
  8551. size_t out_buff_offs = 0;
  8552. for (int tnum = 0; tnum < nthread; tnum++) {
  8553. size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  8554. size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
  8555. size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  8556. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  8557. if (typ == GGML_TYPE_F16) {
  8558. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  8559. } else {
  8560. qtype.to_float(inbuf, outbuf, nels);
  8561. }
  8562. };
  8563. workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
  8564. in_buff_offs += thr_block_bytes;
  8565. out_buff_offs += thr_elems;
  8566. }
  8567. for (auto & w : workers) { w.join(); }
  8568. workers.clear();
  8569. }
  8570. static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
  8571. const std::string name = ggml_get_name(tensor);
  8572. // TODO: avoid hardcoded tensor names - use the TN_* constants
  8573. const llm_arch arch = qs.model.arch;
  8574. const auto tn = LLM_TN(arch);
  8575. auto use_more_bits = [](int i_layer, int num_layers) -> bool {
  8576. return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
  8577. };
  8578. const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
  8579. auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
  8580. if (n_expert > 1) {
  8581. // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but iccasionally randomly
  8582. // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
  8583. // for getting the current layer as I initially thought, and we need to resort to parsing the
  8584. // tensor name.
  8585. n_layer /= n_expert;
  8586. if (sscanf(name, "blk.%d.", &i_layer) != 1) {
  8587. throw std::runtime_error(format("Failed to determine layer for tensor %s", name));
  8588. }
  8589. if (i_layer < 0 || i_layer >= n_layer) {
  8590. throw std::runtime_error(format("Bad layer %d for tensor %s. Must be in [0, %d)", i_layer, name, n_layer));
  8591. }
  8592. }
  8593. return std::make_pair(i_layer, n_layer);
  8594. };
  8595. if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
  8596. int nx = tensor->ne[0];
  8597. if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
  8598. new_type = GGML_TYPE_Q8_0;
  8599. }
  8600. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) {
  8601. new_type = GGML_TYPE_Q5_K;
  8602. }
  8603. else if (new_type != GGML_TYPE_Q8_0) {
  8604. new_type = GGML_TYPE_Q6_K;
  8605. }
  8606. } else if (name == "token_embd.weight") {
  8607. if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) {
  8608. new_type = GGML_TYPE_Q2_K;
  8609. }
  8610. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  8611. new_type = GGML_TYPE_Q4_K;
  8612. }
  8613. } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) {
  8614. if (name.find("attn_v.weight") != std::string::npos) {
  8615. if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
  8616. else new_type = GGML_TYPE_Q2_K;
  8617. ++qs.i_attention_wv;
  8618. }
  8619. else if (name.find("ffn_down") != std::string::npos) {
  8620. if (qs.i_ffn_down < qs.n_ffn_down/8) new_type = GGML_TYPE_Q2_K;
  8621. ++qs.i_ffn_down;
  8622. }
  8623. else if (name.find("attn_output.weight") != std::string::npos) {
  8624. if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) new_type = GGML_TYPE_IQ2_XXS;
  8625. }
  8626. } else if (name.find("attn_v.weight") != std::string::npos) {
  8627. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
  8628. new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
  8629. }
  8630. else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
  8631. new_type = GGML_TYPE_Q4_K;
  8632. }
  8633. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  8634. new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : !qs.has_imatrix ? GGML_TYPE_Q3_K : GGML_TYPE_IQ3_XXS;
  8635. }
  8636. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  8637. new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  8638. }
  8639. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  8640. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  8641. use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
  8642. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
  8643. else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
  8644. (qs.i_attention_wv < qs.n_attention_wv/8 || qs.i_attention_wv >= 7*qs.n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
  8645. if (qs.model.type == MODEL_70B) {
  8646. // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
  8647. // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
  8648. // nearly negligible increase in model size by quantizing this tensor with more bits:
  8649. if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
  8650. }
  8651. if (qs.model.hparams.n_expert == 8) {
  8652. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  8653. // TODO: explore better strategies
  8654. new_type = GGML_TYPE_Q8_0;
  8655. }
  8656. ++qs.i_attention_wv;
  8657. } else if (name.find("attn_k.weight") != std::string::npos) {
  8658. if (qs.model.hparams.n_expert == 8) {
  8659. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  8660. // TODO: explore better strategies
  8661. new_type = GGML_TYPE_Q8_0;
  8662. }
  8663. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS) {
  8664. new_type = GGML_TYPE_Q2_K;
  8665. }
  8666. } else if (name.find("ffn_down") != std::string::npos) {
  8667. auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
  8668. int i_layer = info.first, n_layer = info.second;
  8669. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  8670. else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS) {
  8671. if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
  8672. }
  8673. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS && !qs.has_imatrix) {
  8674. new_type = i_layer < n_layer/8 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
  8675. }
  8676. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  8677. new_type = i_layer < n_layer/16 ? GGML_TYPE_Q5_K
  8678. : arch != LLM_ARCH_FALCON || use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q4_K
  8679. : GGML_TYPE_Q3_K;
  8680. }
  8681. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
  8682. new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
  8683. }
  8684. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
  8685. if (arch == LLM_ARCH_FALCON) {
  8686. new_type = i_layer < n_layer/16 ? GGML_TYPE_Q6_K :
  8687. use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  8688. } else {
  8689. if (use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
  8690. }
  8691. }
  8692. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
  8693. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && i_layer < n_layer/8) {
  8694. new_type = GGML_TYPE_Q5_K;
  8695. }
  8696. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || ftype == LLAMA_FTYPE_MOSTLY_Q5_0)
  8697. && qs.has_imatrix && i_layer < n_layer/8) {
  8698. // Guard against craziness in the first few ffn_down layers that can happen even with imatrix for Q4_0/Q5_0.
  8699. // We only do it when an imatrix is provided because a) we want to make sure that one can always get the
  8700. // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
  8701. new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
  8702. }
  8703. ++qs.i_ffn_down;
  8704. } else if (name.find("attn_output.weight") != std::string::npos) {
  8705. if (arch != LLM_ARCH_FALCON) {
  8706. if (qs.model.hparams.n_expert == 8) {
  8707. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
  8708. ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ||
  8709. ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
  8710. new_type = GGML_TYPE_Q5_K;
  8711. }
  8712. } else {
  8713. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
  8714. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_Q3_K;
  8715. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) new_type = GGML_TYPE_Q4_K;
  8716. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  8717. }
  8718. } else {
  8719. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  8720. }
  8721. }
  8722. else if (name.find("attn_qkv.weight") != std::string::npos) {
  8723. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  8724. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
  8725. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
  8726. }
  8727. else if (name.find("ffn_gate") != std::string::npos) {
  8728. auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
  8729. int i_layer = info.first, n_layer = info.second;
  8730. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS && !use_more_bits(i_layer, n_layer)) {
  8731. new_type = GGML_TYPE_Q2_K;
  8732. }
  8733. ++qs.i_ffn_gate;
  8734. }
  8735. else if (name.find("ffn_up") != std::string::npos) {
  8736. auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
  8737. int i_layer = info.first, n_layer = info.second;
  8738. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_XS && !use_more_bits(i_layer, n_layer)) {
  8739. new_type = GGML_TYPE_Q2_K;
  8740. }
  8741. ++qs.i_ffn_up;
  8742. }
  8743. // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  8744. //}
  8745. // IK: let's remove this, else Q2_K is almost the same as Q3_K_S
  8746. //else if (name.find("ffn_gate") != std::string::npos || name.find("ffn_up") != std::string::npos) {
  8747. // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  8748. //}
  8749. // This can be used to reduce the size of the Q5_K_S model.
  8750. // The associated PPL increase is fully in line with the size reduction
  8751. //else {
  8752. // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
  8753. //}
  8754. bool convert_incompatible_tensor = false;
  8755. if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
  8756. new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K ||
  8757. new_type == GGML_TYPE_IQ2_XS || new_type == GGML_TYPE_IQ2_XXS ||
  8758. new_type == GGML_TYPE_IQ3_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) {
  8759. int nx = tensor->ne[0];
  8760. int ny = tensor->ne[1];
  8761. if (nx % QK_K != 0) {
  8762. LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
  8763. convert_incompatible_tensor = true;
  8764. } else {
  8765. ++qs.n_k_quantized;
  8766. }
  8767. }
  8768. if (convert_incompatible_tensor) {
  8769. switch (new_type) {
  8770. case GGML_TYPE_IQ2_XXS:
  8771. case GGML_TYPE_IQ2_XS:
  8772. case GGML_TYPE_IQ3_XXS:
  8773. case GGML_TYPE_IQ1_S:
  8774. case GGML_TYPE_Q2_K: new_type = GGML_TYPE_Q4_0; break;
  8775. case GGML_TYPE_Q3_K: new_type = GGML_TYPE_Q4_1; break;
  8776. case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
  8777. case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
  8778. case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
  8779. default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
  8780. }
  8781. LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
  8782. ++qs.n_fallback;
  8783. }
  8784. return new_type;
  8785. }
  8786. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  8787. ggml_type quantized_type;
  8788. llama_ftype ftype = params->ftype;
  8789. switch (params->ftype) {
  8790. case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
  8791. case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
  8792. case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
  8793. case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
  8794. case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
  8795. case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
  8796. case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
  8797. // K-quants
  8798. case LLAMA_FTYPE_MOSTLY_Q2_K_S:
  8799. case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
  8800. case LLAMA_FTYPE_MOSTLY_Q3_K_XS:
  8801. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  8802. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  8803. case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break;
  8804. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  8805. case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break;
  8806. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  8807. case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
  8808. case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
  8809. case LLAMA_FTYPE_MOSTLY_IQ2_XXS: quantized_type = GGML_TYPE_IQ2_XXS; break;
  8810. case LLAMA_FTYPE_MOSTLY_IQ2_XS: quantized_type = GGML_TYPE_IQ2_XS; break;
  8811. case LLAMA_FTYPE_MOSTLY_IQ3_XXS: quantized_type = GGML_TYPE_IQ3_XXS; break;
  8812. case LLAMA_FTYPE_MOSTLY_IQ1_S: quantized_type = GGML_TYPE_IQ1_S ; break;
  8813. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  8814. }
  8815. int nthread = params->nthread;
  8816. if (nthread <= 0) {
  8817. nthread = std::thread::hardware_concurrency();
  8818. }
  8819. // mmap consistently increases speed Linux, and also increases speed on Windows with
  8820. // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
  8821. #if defined(__linux__) || defined(_WIN32)
  8822. constexpr bool use_mmap = true;
  8823. #else
  8824. constexpr bool use_mmap = false;
  8825. #endif
  8826. llama_model_loader ml(fname_inp, use_mmap, NULL);
  8827. ml.init_mapping(false); // no prefetching?
  8828. llama_model model;
  8829. llm_load_arch(ml, model);
  8830. llm_load_hparams(ml, model);
  8831. struct quantize_state_internal qs(model, params);
  8832. if (params->only_copy) {
  8833. ftype = model.ftype;
  8834. }
  8835. const std::unordered_map<std::string, std::vector<float>> * imatrix_data = nullptr;
  8836. if (params->imatrix) {
  8837. imatrix_data = static_cast<const std::unordered_map<std::string, std::vector<float>>*>(params->imatrix);
  8838. if (imatrix_data) {
  8839. LLAMA_LOG_INFO("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
  8840. qs.has_imatrix = true;
  8841. }
  8842. }
  8843. const size_t align = GGUF_DEFAULT_ALIGNMENT;
  8844. struct gguf_context * ctx_out = gguf_init_empty();
  8845. // copy the KV pairs from the input file
  8846. gguf_set_kv (ctx_out, ml.ctx_gguf);
  8847. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  8848. gguf_set_val_u32(ctx_out, "general.file_type", ftype);
  8849. for (int i = 0; i < ml.n_tensors; ++i) {
  8850. struct ggml_tensor * meta = ml.get_tensor_meta(i);
  8851. const std::string name = ggml_get_name(meta);
  8852. // TODO: avoid hardcoded tensor names - use the TN_* constants
  8853. if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
  8854. ++qs.n_attention_wv;
  8855. }
  8856. else if (name.find("ffn_down") != std::string::npos) {
  8857. ++qs.n_ffn_down;
  8858. }
  8859. else if (name.find("ffn_gate") != std::string::npos) {
  8860. ++qs.n_ffn_gate;
  8861. }
  8862. else if (name.find("ffn_up") != std::string::npos) {
  8863. ++qs.n_ffn_up;
  8864. }
  8865. }
  8866. if (qs.n_attention_wv != qs.n_ffn_down || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) {
  8867. LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_ffn_down = %d, hparams.n_layer = %d\n",
  8868. __func__, qs.n_attention_wv, qs.n_ffn_down, model.hparams.n_layer);
  8869. }
  8870. size_t total_size_org = 0;
  8871. size_t total_size_new = 0;
  8872. std::vector<int64_t> hist_all(1 << 4, 0);
  8873. std::vector<std::thread> workers;
  8874. workers.reserve(nthread);
  8875. std::mutex mutex;
  8876. int idx = 0;
  8877. std::vector<no_init<uint8_t>> read_data;
  8878. std::vector<no_init<uint8_t>> work;
  8879. std::vector<no_init<float>> f32_conv_buf;
  8880. // populate the original tensors so we get an initial meta data
  8881. for (int i = 0; i < ml.n_tensors; ++i) {
  8882. struct ggml_tensor * meta = ml.get_tensor_meta(i);
  8883. gguf_add_tensor(ctx_out, meta);
  8884. }
  8885. std::ofstream fout(fname_out, std::ios::binary);
  8886. fout.exceptions(std::ofstream::failbit); // fail fast on write errors
  8887. const size_t meta_size = gguf_get_meta_size(ctx_out);
  8888. LLAMA_LOG_INFO("%s: meta size = %zu bytes\n", __func__, meta_size);
  8889. // placeholder for the meta data
  8890. ::zeros(fout, meta_size);
  8891. for (int i = 0; i < ml.n_tensors; ++i) {
  8892. struct ggml_tensor * tensor = ml.get_tensor_meta(i);
  8893. const std::string name = ggml_get_name(tensor);
  8894. if (!ml.use_mmap) {
  8895. if (read_data.size() < ggml_nbytes(tensor)) {
  8896. read_data.resize(ggml_nbytes(tensor));
  8897. }
  8898. tensor->data = read_data.data();
  8899. }
  8900. ml.load_data_for(tensor);
  8901. LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
  8902. ++idx, ml.n_tensors,
  8903. ggml_get_name(tensor),
  8904. llama_format_tensor_shape(tensor).c_str(),
  8905. ggml_type_name(tensor->type));
  8906. // This used to be a regex, but <regex> has an extreme cost to compile times.
  8907. bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
  8908. // quantize only 2D tensors
  8909. quantize &= (ggml_n_dims(tensor) == 2);
  8910. quantize &= params->quantize_output_tensor || name != "output.weight";
  8911. quantize &= !params->only_copy;
  8912. // do not quantize expert gating tensors
  8913. quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight");
  8914. // do not quantize positional embeddings and token types (BERT)
  8915. quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");
  8916. quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight");
  8917. enum ggml_type new_type;
  8918. void * new_data;
  8919. size_t new_size;
  8920. if (quantize) {
  8921. new_type = quantized_type;
  8922. if (!params->pure) {
  8923. new_type = get_k_quant_type(qs, new_type, tensor, ftype);
  8924. }
  8925. // If we've decided to quantize to the same type the tensor is already
  8926. // in then there's nothing to do.
  8927. quantize = tensor->type != new_type;
  8928. }
  8929. if (!quantize) {
  8930. new_type = tensor->type;
  8931. new_data = tensor->data;
  8932. new_size = ggml_nbytes(tensor);
  8933. LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
  8934. } else {
  8935. const size_t nelements = ggml_nelements(tensor);
  8936. const float * imatrix = nullptr;
  8937. if (imatrix_data) {
  8938. auto it = imatrix_data->find(tensor->name);
  8939. if (it == imatrix_data->end()) {
  8940. LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
  8941. } else {
  8942. if (it->second.size() == (size_t)tensor->ne[0]) {
  8943. imatrix = it->second.data();
  8944. } else {
  8945. LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
  8946. int(it->second.size()), int(tensor->ne[0]), tensor->name);
  8947. }
  8948. }
  8949. }
  8950. if ((new_type == GGML_TYPE_IQ2_XXS ||
  8951. new_type == GGML_TYPE_IQ2_XS ||
  8952. new_type == GGML_TYPE_IQ1_S ||
  8953. (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
  8954. LLAMA_LOG_ERROR("\n\n============================================================\n");
  8955. LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
  8956. LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");
  8957. LLAMA_LOG_ERROR("============================================================\n\n");
  8958. throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name));
  8959. }
  8960. float * f32_data;
  8961. if (tensor->type == GGML_TYPE_F32) {
  8962. f32_data = (float *) tensor->data;
  8963. } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
  8964. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
  8965. } else {
  8966. llama_convert_tensor_internal(tensor, f32_conv_buf, workers, nelements, nthread);
  8967. f32_data = (float *) f32_conv_buf.data();
  8968. }
  8969. LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
  8970. fflush(stdout);
  8971. if (work.size() < nelements * 4) {
  8972. work.resize(nelements * 4); // upper bound on size
  8973. }
  8974. new_data = work.data();
  8975. std::array<int64_t, 1 << 4> hist_cur = {};
  8976. const int n_per_row = tensor->ne[0];
  8977. const int nrows = nelements / n_per_row;
  8978. static const int min_chunk_size = 32 * 512;
  8979. const int chunk_size = n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row);
  8980. const int nchunk = (nelements + chunk_size - 1)/chunk_size;
  8981. const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
  8982. if (nthread_use < 2) {
  8983. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, hist_cur.data(), imatrix);
  8984. } else {
  8985. int counter = 0;
  8986. new_size = 0;
  8987. auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, chunk_size,
  8988. nrows, n_per_row, imatrix]() {
  8989. std::array<int64_t, 1 << 4> local_hist = {};
  8990. const int nrows_per_chunk = chunk_size / n_per_row;
  8991. size_t local_size = 0;
  8992. while (true) {
  8993. std::unique_lock<std::mutex> lock(mutex);
  8994. int first_row = counter; counter += nrows_per_chunk;
  8995. if (first_row >= nrows) {
  8996. if (local_size > 0) {
  8997. for (int j=0; j<int(local_hist.size()); ++j) {
  8998. hist_cur[j] += local_hist[j];
  8999. }
  9000. new_size += local_size;
  9001. }
  9002. break;
  9003. }
  9004. lock.unlock();
  9005. const int this_nrow = std::min(nrows - first_row, nrows_per_chunk);
  9006. local_size += ggml_quantize_chunk(new_type, f32_data, new_data,
  9007. first_row * n_per_row, this_nrow, n_per_row, local_hist.data(), imatrix);
  9008. }
  9009. };
  9010. for (int it = 0; it < nthread_use - 1; ++it) {
  9011. workers.emplace_back(compute);
  9012. }
  9013. compute();
  9014. for (auto & w : workers) { w.join(); }
  9015. workers.clear();
  9016. }
  9017. LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
  9018. int64_t tot_count = 0;
  9019. for (size_t i = 0; i < hist_cur.size(); i++) {
  9020. hist_all[i] += hist_cur[i];
  9021. tot_count += hist_cur[i];
  9022. }
  9023. if (tot_count > 0) {
  9024. LLAMA_LOG_INFO(" | hist: ");
  9025. for (size_t i = 0; i < hist_cur.size(); i++) {
  9026. LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
  9027. }
  9028. }
  9029. LLAMA_LOG_INFO("\n");
  9030. }
  9031. total_size_org += ggml_nbytes(tensor);
  9032. total_size_new += new_size;
  9033. // update the gguf meta data as we go
  9034. gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
  9035. gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
  9036. // write tensor data + padding
  9037. fout.write((const char *) new_data, new_size);
  9038. zeros(fout, GGML_PAD(new_size, align) - new_size);
  9039. }
  9040. // go back to beginning of file and write the updated meta data
  9041. {
  9042. fout.seekp(0);
  9043. std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
  9044. gguf_get_meta_data(ctx_out, data.data());
  9045. fout.write((const char *) data.data(), data.size());
  9046. }
  9047. fout.close();
  9048. gguf_free(ctx_out);
  9049. LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  9050. LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  9051. // print histogram for all tensors
  9052. {
  9053. int64_t sum_all = 0;
  9054. for (size_t i = 0; i < hist_all.size(); i++) {
  9055. sum_all += hist_all[i];
  9056. }
  9057. if (sum_all > 0) {
  9058. LLAMA_LOG_INFO("%s: hist: ", __func__);
  9059. for (size_t i = 0; i < hist_all.size(); i++) {
  9060. LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
  9061. }
  9062. LLAMA_LOG_INFO("\n");
  9063. }
  9064. }
  9065. if (qs.n_fallback > 0) {
  9066. LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) incompatible with k-quants and required fallback quantization\n",
  9067. __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
  9068. }
  9069. }
  9070. static int llama_apply_lora_from_file_internal(
  9071. const struct llama_model & model, const char * path_lora, float scale, const char * path_base_model, int n_threads
  9072. ) {
  9073. LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  9074. const int64_t t_start_lora_us = ggml_time_us();
  9075. llama_file fin(path_lora, "rb");
  9076. // verify magic and version
  9077. {
  9078. uint32_t magic = fin.read_u32();
  9079. if (magic != LLAMA_FILE_MAGIC_GGLA) {
  9080. LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
  9081. return 1;
  9082. }
  9083. uint32_t format_version = fin.read_u32();
  9084. if (format_version != 1) {
  9085. LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
  9086. return 1;
  9087. }
  9088. }
  9089. int32_t lora_r = fin.read_u32();
  9090. int32_t lora_alpha = fin.read_u32();
  9091. float scaling = scale * (float)lora_alpha / (float)lora_r;
  9092. LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  9093. // load base model
  9094. std::unique_ptr<llama_model_loader> ml;
  9095. if (path_base_model) {
  9096. LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
  9097. ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*kv_overrides*/ nullptr));
  9098. ml->init_mapping(/*prefetch*/ false); // no prefetching
  9099. }
  9100. struct tensor_meta {
  9101. std::string name;
  9102. ggml_type type;
  9103. int32_t ne[2];
  9104. size_t offset;
  9105. };
  9106. std::map<std::string, tensor_meta> tensor_meta_map;
  9107. // load all tensor meta
  9108. while (true) {
  9109. if (fin.tell() == fin.size) {
  9110. // eof
  9111. break;
  9112. }
  9113. int32_t n_dims;
  9114. int32_t name_len;
  9115. int32_t ftype;
  9116. fin.read_raw(&n_dims, sizeof(n_dims));
  9117. fin.read_raw(&name_len, sizeof(name_len));
  9118. fin.read_raw(&ftype, sizeof(ftype));
  9119. if (n_dims != 1 && n_dims != 2) {
  9120. LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
  9121. return 1;
  9122. }
  9123. int32_t ne[2] = { 1, 1 };
  9124. for (int i = 0; i < n_dims; ++i) {
  9125. fin.read_raw(&ne[i], sizeof(ne[i]));
  9126. }
  9127. std::string name;
  9128. {
  9129. GGML_ASSERT(name_len < GGML_MAX_NAME);
  9130. char buf[GGML_MAX_NAME];
  9131. fin.read_raw(buf, name_len);
  9132. name = std::string(buf, name_len);
  9133. }
  9134. // check for lora suffix
  9135. std::string lora_suffix;
  9136. if (name.length() > 6) {
  9137. lora_suffix = name.substr(name.length() - 6);
  9138. }
  9139. if (lora_suffix != ".loraA" && lora_suffix != ".loraB") {
  9140. LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  9141. return 1;
  9142. }
  9143. // tensor type
  9144. ggml_type wtype;
  9145. switch (ftype) {
  9146. case 0: wtype = GGML_TYPE_F32; break;
  9147. case 1: wtype = GGML_TYPE_F16; break;
  9148. default:
  9149. {
  9150. LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
  9151. __func__, ftype);
  9152. return 1;
  9153. }
  9154. }
  9155. // data offset
  9156. size_t offset = fin.tell();
  9157. offset = (offset + 31) & -32;
  9158. // skip tensor data
  9159. fin.seek(offset + ggml_row_size(wtype, ne[0]) * ne[1], SEEK_SET);
  9160. tensor_meta_map.emplace(name, tensor_meta{ name, wtype, { ne[0], ne[1] }, offset });
  9161. }
  9162. bool warned = false;
  9163. int n_tensors = 0;
  9164. // apply
  9165. ggml_backend_t backend_cpu = ggml_backend_cpu_init();
  9166. if (backend_cpu == nullptr) {
  9167. LLAMA_LOG_ERROR("%s: error: failed to initialize cpu backend\n", __func__);
  9168. return 1;
  9169. }
  9170. ggml_backend_cpu_set_n_threads(backend_cpu, n_threads);
  9171. std::vector<no_init<uint8_t>> read_buf;
  9172. for (const auto & it : model.tensors_by_name) {
  9173. const std::string & base_name = it.first;
  9174. ggml_tensor * model_t = it.second;
  9175. if (tensor_meta_map.find(base_name + ".loraA") == tensor_meta_map.end() ||
  9176. tensor_meta_map.find(base_name + ".loraB") == tensor_meta_map.end()) {
  9177. continue;
  9178. }
  9179. tensor_meta & metaA = tensor_meta_map.at(base_name + ".loraA");
  9180. tensor_meta & metaB = tensor_meta_map.at(base_name + ".loraB");
  9181. ggml_init_params lora_init_params = {
  9182. /* .mem_size */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
  9183. /* .mem_buffer */ nullptr,
  9184. /* .no_alloc */ true,
  9185. };
  9186. ggml_context * lora_ctx = ggml_init(lora_init_params);
  9187. if (lora_ctx == nullptr) {
  9188. LLAMA_LOG_ERROR("%s: error: failed to initialize lora context\n", __func__);
  9189. ggml_backend_free(backend_cpu);
  9190. return 1;
  9191. }
  9192. // create tensors
  9193. ggml_tensor * loraA = ggml_new_tensor_2d(lora_ctx, metaA.type, metaA.ne[0], metaA.ne[1]);
  9194. ggml_tensor * loraB = ggml_new_tensor_2d(lora_ctx, metaB.type, metaB.ne[0], metaB.ne[1]);
  9195. ggml_set_name(loraA, metaA.name.c_str());
  9196. ggml_set_name(loraB, metaB.name.c_str());
  9197. ggml_tensor * base_t;
  9198. if (ml) {
  9199. if (gguf_find_tensor(ml->ctx_gguf, base_name.c_str()) < 0) {
  9200. LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  9201. return 1;
  9202. }
  9203. base_t = ggml_dup_tensor(lora_ctx, ml->get_tensor_meta(base_name.c_str()));
  9204. } else {
  9205. base_t = ggml_dup_tensor(lora_ctx, model_t);
  9206. }
  9207. ggml_set_name(base_t, base_name.c_str());
  9208. // allocate in backend buffer
  9209. ggml_backend_buffer_t lora_buf = ggml_backend_alloc_ctx_tensors_from_buft(lora_ctx, ggml_backend_cpu_buffer_type());
  9210. if (lora_buf == nullptr) {
  9211. LLAMA_LOG_ERROR("%s: error: failed to allocate lora tensors\n", __func__);
  9212. return 1;
  9213. }
  9214. // load tensor data
  9215. auto load_tensor = [&read_buf, &fin](const tensor_meta & tensor_meta, ggml_tensor * tensor) {
  9216. read_buf.resize(ggml_nbytes(tensor));
  9217. fin.seek(tensor_meta.offset, SEEK_SET);
  9218. fin.read_raw(read_buf.data(), ggml_nbytes(tensor));
  9219. ggml_backend_tensor_set(tensor, read_buf.data(), 0, read_buf.size());
  9220. };
  9221. load_tensor(metaA, loraA);
  9222. load_tensor(metaB, loraB);
  9223. // load base model tensor data
  9224. if (ml) {
  9225. ml->load_data_for(base_t);
  9226. } else {
  9227. ggml_backend_tensor_copy(model_t, base_t);
  9228. }
  9229. if (ggml_is_quantized(base_t->type) && !warned) {
  9230. LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  9231. "use a f16 or f32 base model with --lora-base\n", __func__);
  9232. warned = true;
  9233. }
  9234. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  9235. LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  9236. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  9237. ggml_free(lora_ctx);
  9238. ggml_backend_buffer_free(lora_buf);
  9239. ggml_backend_free(backend_cpu);
  9240. return 1;
  9241. }
  9242. auto build_lora_graph = [&]() {
  9243. // w = w + BA*s
  9244. ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
  9245. ggml_set_name(BA, "BA");
  9246. if (scaling != 1.0f) {
  9247. BA = ggml_scale(lora_ctx, BA, scaling);
  9248. ggml_set_name(BA, "BA_scaled");
  9249. }
  9250. ggml_tensor * r;
  9251. r = ggml_add_inplace(lora_ctx, base_t, BA);
  9252. ggml_set_name(r, "r_add");
  9253. if (base_t->type != model_t->type) {
  9254. // convert the result to the model type
  9255. r = ggml_cast(lora_ctx, r, model_t->type);
  9256. ggml_set_name(r, "r_cast");
  9257. }
  9258. return r;
  9259. };
  9260. ggml_cgraph * gf = ggml_new_graph(lora_ctx);
  9261. ggml_tensor * r = build_lora_graph();
  9262. ggml_build_forward_expand(gf, r);
  9263. ggml_backend_buffer_t graph_buf = ggml_backend_alloc_ctx_tensors_from_buft(lora_ctx, ggml_backend_cpu_buffer_type());
  9264. if (graph_buf == nullptr) {
  9265. LLAMA_LOG_ERROR("%s: error: failed to allocate graph tensors\n", __func__);
  9266. ggml_free(lora_ctx);
  9267. ggml_backend_buffer_free(lora_buf);
  9268. ggml_backend_free(backend_cpu);
  9269. return 1;
  9270. }
  9271. ggml_backend_graph_compute(backend_cpu, gf);
  9272. ggml_backend_tensor_set(model_t, r->data, 0, ggml_nbytes(r));
  9273. #if 0
  9274. // TODO: use scheduler with fallback to CPU for less copies between CPU and GPU
  9275. //ggml_backend_sched_t sched = ggml_backend_sched_new(backends.data(), backends.size(), GGML_DEFAULT_GRAPH_SIZE);
  9276. // sched compute
  9277. ggml_build_forward_expand(gf, build_graph());
  9278. ggml_backend_sched_init_measure(sched, gf);
  9279. // create the graph again, since the previous one was destroyed by the measure
  9280. ggml_graph_clear(gf);
  9281. ggml_build_forward_expand(gf, build_graph());
  9282. ggml_backend_sched_graph_compute(sched, gf);
  9283. ggml_backend_sched_free(sched);
  9284. #endif
  9285. ggml_backend_buffer_free(lora_buf);
  9286. ggml_backend_buffer_free(graph_buf);
  9287. ggml_free(lora_ctx);
  9288. n_tensors++;
  9289. if (n_tensors % 4 == 0) {
  9290. LLAMA_LOG_INFO(".");
  9291. }
  9292. }
  9293. ggml_backend_free(backend_cpu);
  9294. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  9295. LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
  9296. return 0;
  9297. }
  9298. //
  9299. // interface implementation
  9300. //
  9301. struct llama_model_params llama_model_default_params() {
  9302. struct llama_model_params result = {
  9303. /*.n_gpu_layers =*/ 0,
  9304. /*.split_mode =*/ LLAMA_SPLIT_LAYER,
  9305. /*.main_gpu =*/ 0,
  9306. /*.tensor_split =*/ nullptr,
  9307. /*.progress_callback =*/ nullptr,
  9308. /*.progress_callback_user_data =*/ nullptr,
  9309. /*.kv_overrides =*/ nullptr,
  9310. /*.vocab_only =*/ false,
  9311. /*.use_mmap =*/ true,
  9312. /*.use_mlock =*/ false,
  9313. };
  9314. #ifdef GGML_USE_METAL
  9315. // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
  9316. result.n_gpu_layers = 999;
  9317. #endif
  9318. return result;
  9319. }
  9320. struct llama_context_params llama_context_default_params() {
  9321. struct llama_context_params result = {
  9322. /*.seed =*/ LLAMA_DEFAULT_SEED,
  9323. /*.n_ctx =*/ 512,
  9324. /*.n_batch =*/ 512,
  9325. /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
  9326. /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
  9327. /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_UNSPECIFIED,
  9328. /*.rope_freq_base =*/ 0.0f,
  9329. /*.rope_freq_scale =*/ 0.0f,
  9330. /*.yarn_ext_factor =*/ -1.0f,
  9331. /*.yarn_attn_factor =*/ 1.0f,
  9332. /*.yarn_beta_fast =*/ 32.0f,
  9333. /*.yarn_beta_slow =*/ 1.0f,
  9334. /*.yarn_orig_ctx =*/ 0,
  9335. /*.cb_eval =*/ nullptr,
  9336. /*.cb_eval_user_data =*/ nullptr,
  9337. /*.type_k =*/ GGML_TYPE_F16,
  9338. /*.type_v =*/ GGML_TYPE_F16,
  9339. /*.mul_mat_q =*/ true,
  9340. /*.logits_all =*/ false,
  9341. /*.embedding =*/ false,
  9342. /*.offload_kqv =*/ true,
  9343. /*.do_pooling =*/ true,
  9344. };
  9345. return result;
  9346. }
  9347. struct llama_model_quantize_params llama_model_quantize_default_params() {
  9348. struct llama_model_quantize_params result = {
  9349. /*.nthread =*/ 0,
  9350. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  9351. /*.allow_requantize =*/ false,
  9352. /*.quantize_output_tensor =*/ true,
  9353. /*.only_copy =*/ false,
  9354. /*.pure =*/ false,
  9355. /*.imatrix =*/ nullptr,
  9356. };
  9357. return result;
  9358. }
  9359. size_t llama_max_devices(void) {
  9360. #if defined(GGML_USE_METAL)
  9361. return 1;
  9362. #elif defined(GGML_USE_CUBLAS)
  9363. return GGML_CUDA_MAX_DEVICES;
  9364. #elif defined(GGML_USE_SYCL)
  9365. return GGML_SYCL_MAX_DEVICES;
  9366. #elif defined(GGML_USE_VULKAN)
  9367. return GGML_VK_MAX_DEVICES;
  9368. #else
  9369. return 1;
  9370. #endif
  9371. }
  9372. bool llama_supports_mmap(void) {
  9373. return llama_mmap::SUPPORTED;
  9374. }
  9375. bool llama_supports_mlock(void) {
  9376. return llama_mlock::SUPPORTED;
  9377. }
  9378. bool llama_supports_gpu_offload(void) {
  9379. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \
  9380. defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE)
  9381. // Defined when llama.cpp is compiled with support for offloading model layers to GPU.
  9382. return true;
  9383. #else
  9384. return false;
  9385. #endif
  9386. }
  9387. // deprecated:
  9388. bool llama_mmap_supported(void) {
  9389. return llama_supports_mmap();
  9390. }
  9391. bool llama_mlock_supported(void) {
  9392. return llama_supports_mlock();
  9393. }
  9394. void llama_backend_init(void) {
  9395. ggml_time_init();
  9396. // needed to initialize f16 tables
  9397. {
  9398. struct ggml_init_params params = { 0, NULL, false };
  9399. struct ggml_context * ctx = ggml_init(params);
  9400. ggml_free(ctx);
  9401. }
  9402. #ifdef GGML_USE_MPI
  9403. ggml_mpi_backend_init();
  9404. #endif
  9405. }
  9406. void llama_numa_init(enum ggml_numa_strategy numa) {
  9407. if (numa != GGML_NUMA_STRATEGY_DISABLED) {
  9408. ggml_numa_init(numa);
  9409. }
  9410. }
  9411. void llama_backend_free(void) {
  9412. #ifdef GGML_USE_MPI
  9413. ggml_mpi_backend_free();
  9414. #endif
  9415. ggml_quantize_free();
  9416. }
  9417. int64_t llama_time_us(void) {
  9418. return ggml_time_us();
  9419. }
  9420. struct llama_model * llama_load_model_from_file(
  9421. const char * path_model,
  9422. struct llama_model_params params) {
  9423. ggml_time_init();
  9424. llama_model * model = new llama_model;
  9425. unsigned cur_percentage = 0;
  9426. if (params.progress_callback == NULL) {
  9427. params.progress_callback_user_data = &cur_percentage;
  9428. params.progress_callback = [](float progress, void * ctx) {
  9429. unsigned * cur_percentage_p = (unsigned *) ctx;
  9430. unsigned percentage = (unsigned) (100 * progress);
  9431. while (percentage > *cur_percentage_p) {
  9432. *cur_percentage_p = percentage;
  9433. LLAMA_LOG_INFO(".");
  9434. if (percentage >= 100) {
  9435. LLAMA_LOG_INFO("\n");
  9436. }
  9437. }
  9438. return true;
  9439. };
  9440. }
  9441. int status = llama_model_load(path_model, *model, params);
  9442. GGML_ASSERT(status <= 0);
  9443. if (status < 0) {
  9444. if (status == -1) {
  9445. LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  9446. } else if (status == -2) {
  9447. LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  9448. }
  9449. delete model;
  9450. return nullptr;
  9451. }
  9452. return model;
  9453. }
  9454. void llama_free_model(struct llama_model * model) {
  9455. delete model;
  9456. }
  9457. struct llama_context * llama_new_context_with_model(
  9458. struct llama_model * model,
  9459. struct llama_context_params params) {
  9460. if (!model) {
  9461. return nullptr;
  9462. }
  9463. llama_context * ctx = new llama_context(*model);
  9464. const auto & hparams = model->hparams;
  9465. auto & cparams = ctx->cparams;
  9466. cparams.n_batch = params.n_batch;
  9467. cparams.n_threads = params.n_threads;
  9468. cparams.n_threads_batch = params.n_threads_batch;
  9469. cparams.yarn_ext_factor = params.yarn_ext_factor;
  9470. cparams.yarn_attn_factor = params.yarn_attn_factor;
  9471. cparams.yarn_beta_fast = params.yarn_beta_fast;
  9472. cparams.yarn_beta_slow = params.yarn_beta_slow;
  9473. cparams.mul_mat_q = params.mul_mat_q;
  9474. cparams.offload_kqv = params.offload_kqv;
  9475. cparams.do_pooling = params.do_pooling;
  9476. cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
  9477. cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
  9478. cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
  9479. cparams.n_yarn_orig_ctx = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
  9480. hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx :
  9481. hparams.n_ctx_train;
  9482. cparams.cb_eval = params.cb_eval;
  9483. cparams.cb_eval_user_data = params.cb_eval_user_data;
  9484. auto rope_scaling_type = params.rope_scaling_type;
  9485. if (rope_scaling_type == LLAMA_ROPE_SCALING_UNSPECIFIED) {
  9486. rope_scaling_type = hparams.rope_scaling_type_train;
  9487. }
  9488. if (rope_scaling_type == LLAMA_ROPE_SCALING_NONE) {
  9489. cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
  9490. }
  9491. if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
  9492. cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_YARN ? 1.0f : 0.0f;
  9493. }
  9494. if (params.seed == LLAMA_DEFAULT_SEED) {
  9495. params.seed = time(NULL);
  9496. }
  9497. LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
  9498. LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
  9499. LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
  9500. ctx->rng = std::mt19937(params.seed);
  9501. ctx->logits_all = params.logits_all;
  9502. const ggml_type type_k = params.type_k;
  9503. const ggml_type type_v = params.type_v;
  9504. GGML_ASSERT(hparams.n_embd_head_k % ggml_blck_size(type_k) == 0);
  9505. GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0);
  9506. if (!hparams.vocab_only) {
  9507. // initialize backends
  9508. #ifdef GGML_USE_METAL
  9509. if (model->n_gpu_layers > 0) {
  9510. ctx->backend_metal = ggml_backend_metal_init();
  9511. if (ctx->backend_metal == nullptr) {
  9512. LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__);
  9513. llama_free(ctx);
  9514. return nullptr;
  9515. }
  9516. ctx->backends.push_back(ctx->backend_metal);
  9517. }
  9518. #elif defined(GGML_USE_CUBLAS)
  9519. if (model->n_gpu_layers > 0) {
  9520. // with split_mode LLAMA_SPLIT_NONE or LLAMA_SPLIT_ROW, only the main GPU backend is used
  9521. if (model->split_mode == LLAMA_SPLIT_NONE || model->split_mode == LLAMA_SPLIT_ROW) {
  9522. ggml_backend_t backend = ggml_backend_cuda_init(model->main_gpu);
  9523. if (backend == nullptr) {
  9524. LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, model->main_gpu);
  9525. llama_free(ctx);
  9526. return nullptr;
  9527. }
  9528. ctx->backends.push_back(backend);
  9529. } else {
  9530. // LLAMA_SPLIT_LAYER requires a backend for each GPU
  9531. for (int device = 0; device < ggml_backend_cuda_get_device_count(); ++device) {
  9532. ggml_backend_t backend = ggml_backend_cuda_init(device);
  9533. if (backend == nullptr) {
  9534. LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, device);
  9535. llama_free(ctx);
  9536. return nullptr;
  9537. }
  9538. ctx->backends.push_back(backend);
  9539. }
  9540. }
  9541. }
  9542. #elif defined(GGML_USE_VULKAN)
  9543. if (model->n_gpu_layers > 0) {
  9544. for (int device = 0; device < ggml_backend_vk_get_device_count(); ++device) {
  9545. ggml_backend_t backend = ggml_backend_vk_init(device);
  9546. if (backend == nullptr) {
  9547. LLAMA_LOG_ERROR("%s: failed to initialize Vulkan%d backend\n", __func__, device);
  9548. llama_free(ctx);
  9549. return nullptr;
  9550. }
  9551. ctx->backends.push_back(backend);
  9552. }
  9553. }
  9554. #elif defined(GGML_USE_SYCL)
  9555. if (model->n_gpu_layers > 0) {
  9556. ggml_backend_t backend = ggml_backend_sycl_init(model->main_gpu);
  9557. if (backend == nullptr) {
  9558. LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d backend\n", __func__, model->main_gpu);
  9559. llama_free(ctx);
  9560. return nullptr;
  9561. }
  9562. ctx->backends.push_back(backend);
  9563. }
  9564. #elif defined(GGML_USE_KOMPUTE)
  9565. if (model->n_gpu_layers > 0) {
  9566. auto * backend = ggml_backend_kompute_init(model->main_gpu);
  9567. if (backend == nullptr) {
  9568. LLAMA_LOG_ERROR("%s: failed to initialize Kompute backend\n", __func__);
  9569. llama_free(ctx);
  9570. return nullptr;
  9571. }
  9572. ctx->backends.push_back(backend);
  9573. }
  9574. #endif
  9575. ctx->backend_cpu = ggml_backend_cpu_init();
  9576. if (ctx->backend_cpu == nullptr) {
  9577. LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__);
  9578. llama_free(ctx);
  9579. return nullptr;
  9580. }
  9581. ctx->backends.push_back(ctx->backend_cpu);
  9582. if (!llama_kv_cache_init(ctx->kv_self, ctx->model, type_k, type_v,
  9583. cparams.n_ctx, cparams.offload_kqv)) {
  9584. LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
  9585. llama_free(ctx);
  9586. return nullptr;
  9587. }
  9588. {
  9589. size_t memory_size_k = 0;
  9590. size_t memory_size_v = 0;
  9591. for (auto & k : ctx->kv_self.k_l) {
  9592. memory_size_k += ggml_nbytes(k);
  9593. }
  9594. for (auto & v : ctx->kv_self.v_l) {
  9595. memory_size_v += ggml_nbytes(v);
  9596. }
  9597. LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
  9598. (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
  9599. ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
  9600. ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
  9601. }
  9602. // resized during inference, reserve maximum
  9603. ctx->logits.reserve(hparams.n_vocab*cparams.n_batch);
  9604. if (params.embedding) {
  9605. ctx->embedding.resize(hparams.n_embd);
  9606. }
  9607. // graph inputs
  9608. {
  9609. ggml_init_params init_params = {
  9610. /* .mem_size */ ggml_tensor_overhead()*8,
  9611. /* .mem_buffer */ nullptr,
  9612. /* .no_alloc */ true,
  9613. };
  9614. ctx->ctx_input = ggml_init(init_params);
  9615. ctx->inp_tokens = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
  9616. ctx->inp_embd = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, hparams.n_embd, cparams.n_batch);
  9617. ctx->inp_pos = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
  9618. ctx->inp_KQ_mask = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, cparams.n_ctx, cparams.n_batch);
  9619. ctx->inp_KQ_pos = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_F32, cparams.n_ctx);
  9620. ctx->inp_K_shift = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_ctx);
  9621. ctx->inp_mean = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, cparams.n_batch, cparams.n_batch);
  9622. ctx->inp_cls = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
  9623. ggml_set_name(ctx->inp_tokens, "inp_tokens");
  9624. ggml_set_name(ctx->inp_embd, "inp_embd");
  9625. ggml_set_name(ctx->inp_pos, "inp_pos");
  9626. ggml_set_name(ctx->inp_KQ_mask, "inp_KQ_mask");
  9627. ggml_set_name(ctx->inp_KQ_pos, "inp_KQ_pos");
  9628. ggml_set_name(ctx->inp_K_shift, "inp_K_shift");
  9629. ggml_set_name(ctx->inp_mean, "inp_mean");
  9630. ggml_set_name(ctx->inp_cls, "inp_cls");
  9631. ctx->buf_input = ggml_backend_alloc_ctx_tensors_from_buft(ctx->ctx_input, llama_default_buffer_type_cpu(true));
  9632. LLAMA_LOG_INFO("%s: %10s input buffer size = %8.2f MiB\n", __func__,
  9633. ggml_backend_buffer_name(ctx->buf_input),
  9634. ggml_backend_buffer_get_size(ctx->buf_input) / 1024.0 / 1024.0);
  9635. }
  9636. // scheduler and compute buffers
  9637. {
  9638. // buffer types used for the compute buffer of each backend
  9639. std::vector<ggml_backend_buffer_type_t> backend_buft;
  9640. for (auto * backend : ctx->backends) {
  9641. if (ggml_backend_is_cpu(backend)) {
  9642. // use host buffers for the CPU backend compute buffer
  9643. backend_buft.push_back(llama_default_buffer_type_cpu(true));
  9644. } else {
  9645. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  9646. }
  9647. }
  9648. // buffer used to store the computation graph and the tensor meta data
  9649. ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead());
  9650. ctx->sched = ggml_backend_sched_new(ctx->backends.data(), backend_buft.data(), ctx->backends.size(), LLAMA_MAX_NODES);
  9651. // build worst-case graph
  9652. int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_batch);
  9653. int n_past = cparams.n_ctx - n_tokens;
  9654. llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  9655. ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0), true);
  9656. // initialize scheduler with the worst-case graph
  9657. if (!ggml_backend_sched_reserve(ctx->sched, gf)) {
  9658. LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
  9659. llama_free(ctx);
  9660. return nullptr;
  9661. }
  9662. for (size_t i = 0; i < ctx->backends.size(); i++) {
  9663. ggml_backend_t backend = ctx->backends[i];
  9664. ggml_backend_buffer_type_t buft = backend_buft[i];
  9665. size_t size = ggml_backend_sched_get_buffer_size(ctx->sched, backend);
  9666. LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  9667. ggml_backend_buft_name(buft),
  9668. size / 1024.0 / 1024.0);
  9669. }
  9670. // note: the number of splits during measure is higher than during inference due to the kv shift
  9671. int n_splits = ggml_backend_sched_get_n_splits(ctx->sched);
  9672. LLAMA_LOG_INFO("%s: graph splits (measure): %d\n", __func__, n_splits);
  9673. }
  9674. }
  9675. #ifdef GGML_USE_MPI
  9676. ctx->ctx_mpi = ggml_mpi_init();
  9677. if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
  9678. // Enter a blocking eval loop with dummy input, letting rank=0 drive the process
  9679. // TODO: needs fix after #3228
  9680. GGML_ASSERT(false && "not implemented");
  9681. //const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
  9682. //while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
  9683. llama_backend_free();
  9684. exit(1);
  9685. }
  9686. #endif
  9687. return ctx;
  9688. }
  9689. void llama_free(struct llama_context * ctx) {
  9690. delete ctx;
  9691. }
  9692. const llama_model * llama_get_model(const struct llama_context * ctx) {
  9693. return &ctx->model;
  9694. }
  9695. uint32_t llama_n_ctx(const struct llama_context * ctx) {
  9696. return ctx->cparams.n_ctx;
  9697. }
  9698. uint32_t llama_n_batch(const struct llama_context * ctx) {
  9699. return ctx->cparams.n_batch;
  9700. }
  9701. enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
  9702. return model->vocab.type;
  9703. }
  9704. int32_t llama_n_vocab(const struct llama_model * model) {
  9705. return model->vocab.id_to_token.size();
  9706. }
  9707. int32_t llama_n_ctx_train(const struct llama_model * model) {
  9708. return model->hparams.n_ctx_train;
  9709. }
  9710. int32_t llama_n_embd(const struct llama_model * model) {
  9711. return model->hparams.n_embd;
  9712. }
  9713. float llama_rope_freq_scale_train(const struct llama_model * model) {
  9714. return model->hparams.rope_freq_scale_train;
  9715. }
  9716. int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
  9717. const auto & it = model->gguf_kv.find(key);
  9718. if (it == model->gguf_kv.end()) {
  9719. if (buf_size > 0) {
  9720. buf[0] = '\0';
  9721. }
  9722. return -1;
  9723. }
  9724. return snprintf(buf, buf_size, "%s", it->second.c_str());
  9725. }
  9726. int32_t llama_model_meta_count(const struct llama_model * model) {
  9727. return (int)model->gguf_kv.size();
  9728. }
  9729. int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
  9730. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  9731. if (buf_size > 0) {
  9732. buf[0] = '\0';
  9733. }
  9734. return -1;
  9735. }
  9736. auto it = model->gguf_kv.begin();
  9737. std::advance(it, i);
  9738. return snprintf(buf, buf_size, "%s", it->first.c_str());
  9739. }
  9740. int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
  9741. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  9742. if (buf_size > 0) {
  9743. buf[0] = '\0';
  9744. }
  9745. return -1;
  9746. }
  9747. auto it = model->gguf_kv.begin();
  9748. std::advance(it, i);
  9749. return snprintf(buf, buf_size, "%s", it->second.c_str());
  9750. }
  9751. int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
  9752. return snprintf(buf, buf_size, "%s %s %s",
  9753. llama_model_arch_name(model->arch),
  9754. llama_model_type_name(model->type),
  9755. llama_model_ftype_name(model->ftype).c_str());
  9756. }
  9757. uint64_t llama_model_size(const struct llama_model * model) {
  9758. uint64_t size = 0;
  9759. for (const auto & it : model->tensors_by_name) {
  9760. size += ggml_nbytes(it.second);
  9761. }
  9762. return size;
  9763. }
  9764. uint64_t llama_model_n_params(const struct llama_model * model) {
  9765. uint64_t nparams = 0;
  9766. for (const auto & it : model->tensors_by_name) {
  9767. nparams += ggml_nelements(it.second);
  9768. }
  9769. return nparams;
  9770. }
  9771. struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
  9772. auto it = std::find_if(model->tensors_by_name.begin(), model->tensors_by_name.end(),
  9773. [name](const std::pair<std::string, struct ggml_tensor *> & it) {
  9774. return it.first == name;
  9775. });
  9776. if (it == model->tensors_by_name.end()) {
  9777. return nullptr;
  9778. }
  9779. return it->second;
  9780. }
  9781. uint32_t llama_model_quantize(
  9782. const char * fname_inp,
  9783. const char * fname_out,
  9784. const llama_model_quantize_params * params) {
  9785. try {
  9786. llama_model_quantize_internal(fname_inp, fname_out, params);
  9787. return 0;
  9788. } catch (const std::exception & err) {
  9789. LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
  9790. return 1;
  9791. }
  9792. }
  9793. int32_t llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, float scale, const char * path_base_model, int32_t n_threads) {
  9794. try {
  9795. return llama_apply_lora_from_file_internal(ctx->model, path_lora, scale, path_base_model, n_threads);
  9796. } catch (const std::exception & err) {
  9797. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  9798. return 1;
  9799. }
  9800. }
  9801. int32_t llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, float scale, const char * path_base_model, int32_t n_threads) {
  9802. try {
  9803. return llama_apply_lora_from_file_internal(*model, path_lora, scale, path_base_model, n_threads);
  9804. } catch (const std::exception & err) {
  9805. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  9806. return 1;
  9807. }
  9808. }
  9809. struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_max_seq) {
  9810. struct llama_kv_cache_view result = {
  9811. /*.n_cells = */ 0,
  9812. /*.n_max_seq = */ n_max_seq,
  9813. /*.token_count = */ 0,
  9814. /*.used_cells = */ llama_get_kv_cache_used_cells(ctx),
  9815. /*.max_contiguous = */ 0,
  9816. /*.max_contiguous_idx = */ -1,
  9817. /*.cells = */ nullptr,
  9818. /*.cells_sequences = */ nullptr,
  9819. };
  9820. return result;
  9821. }
  9822. void llama_kv_cache_view_free(struct llama_kv_cache_view * view) {
  9823. if (view->cells != nullptr) {
  9824. free(view->cells);
  9825. view->cells = nullptr;
  9826. }
  9827. if (view->cells_sequences != nullptr) {
  9828. free(view->cells_sequences);
  9829. view->cells_sequences = nullptr;
  9830. }
  9831. }
  9832. void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) {
  9833. if (uint32_t(view->n_cells) < ctx->kv_self.size || view->cells == nullptr) {
  9834. view->n_cells = int32_t(ctx->kv_self.size);
  9835. void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells);
  9836. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells");
  9837. view->cells = (struct llama_kv_cache_view_cell *)p;
  9838. p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_max_seq * view->n_cells);
  9839. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences");
  9840. view->cells_sequences = (llama_seq_id *)p;
  9841. }
  9842. const std::vector<llama_kv_cell> & kv_cells = ctx->kv_self.cells;
  9843. llama_kv_cache_view_cell * c_curr = view->cells;
  9844. llama_seq_id * cs_curr = view->cells_sequences;
  9845. int32_t used_cells = 0;
  9846. int32_t token_count = 0;
  9847. int32_t curr_contig_idx = -1;
  9848. uint32_t max_contig = 0;
  9849. int32_t max_contig_idx = -1;
  9850. for (int32_t i = 0; i < int32_t(ctx->kv_self.size); i++, c_curr++, cs_curr += view->n_max_seq) {
  9851. const size_t curr_size = kv_cells[i].seq_id.size();
  9852. token_count += curr_size;
  9853. c_curr->pos = kv_cells[i].pos + kv_cells[i].delta;
  9854. if (curr_size > 0) {
  9855. if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) {
  9856. max_contig = i - curr_contig_idx;
  9857. max_contig_idx = curr_contig_idx;
  9858. }
  9859. curr_contig_idx = -1;
  9860. } else if (curr_contig_idx < 0) {
  9861. curr_contig_idx = i;
  9862. }
  9863. int seq_idx = 0;
  9864. for (const llama_seq_id it : kv_cells[i].seq_id) {
  9865. if (seq_idx >= view->n_max_seq) {
  9866. break;
  9867. }
  9868. cs_curr[seq_idx] = it;
  9869. seq_idx++;
  9870. }
  9871. if (seq_idx != 0) {
  9872. used_cells++;
  9873. }
  9874. for (; seq_idx < view->n_max_seq; seq_idx++) {
  9875. cs_curr[seq_idx] = -1;
  9876. }
  9877. }
  9878. if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) {
  9879. max_contig_idx = curr_contig_idx;
  9880. max_contig = kv_cells.size() - curr_contig_idx;
  9881. }
  9882. view->max_contiguous = max_contig;
  9883. view->max_contiguous_idx = max_contig_idx;
  9884. view->token_count = token_count;
  9885. view->used_cells = used_cells;
  9886. if (uint32_t(used_cells) != ctx->kv_self.used) {
  9887. LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n",
  9888. __func__, ctx->kv_self.used, used_cells);
  9889. }
  9890. }
  9891. int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  9892. int result = 0;
  9893. for (uint32_t i = 0; i < ctx->kv_self.size; i++) {
  9894. result += ctx->kv_self.cells[i].seq_id.size();
  9895. }
  9896. return result;
  9897. }
  9898. int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx) {
  9899. return ctx->kv_self.used;
  9900. }
  9901. void llama_kv_cache_clear(struct llama_context * ctx) {
  9902. llama_kv_cache_clear(ctx->kv_self);
  9903. }
  9904. void llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  9905. llama_kv_cache_seq_rm(ctx->kv_self, seq_id, p0, p1);
  9906. }
  9907. void llama_kv_cache_seq_cp(struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  9908. if (seq_id_src == seq_id_dst) {
  9909. return;
  9910. }
  9911. llama_kv_cache_seq_cp(ctx->kv_self, seq_id_src, seq_id_dst, p0, p1);
  9912. }
  9913. void llama_kv_cache_seq_keep(struct llama_context * ctx, llama_seq_id seq_id) {
  9914. llama_kv_cache_seq_keep(ctx->kv_self, seq_id);
  9915. }
  9916. void llama_kv_cache_seq_shift(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) {
  9917. if (delta == 0) {
  9918. return;
  9919. }
  9920. llama_kv_cache_seq_shift(ctx->kv_self, seq_id, p0, p1, delta);
  9921. }
  9922. void llama_kv_cache_seq_div(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
  9923. if (d == 1) {
  9924. return;
  9925. }
  9926. llama_kv_cache_seq_div(ctx->kv_self, seq_id, p0, p1, d);
  9927. }
  9928. // Returns the *maximum* size of the state
  9929. size_t llama_get_state_size(const struct llama_context * ctx) {
  9930. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  9931. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  9932. const size_t s_rng_size = sizeof(size_t);
  9933. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  9934. const size_t s_logits_size = sizeof(size_t);
  9935. // assume worst case for logits although only currently set ones are serialized
  9936. const size_t s_logits = ctx->logits.capacity() * sizeof(float);
  9937. const size_t s_embedding_size = sizeof(size_t);
  9938. const size_t s_embedding = ctx->embedding.size() * sizeof(float);
  9939. const size_t s_kv_size = sizeof(size_t);
  9940. const size_t s_kv_ntok = sizeof(int);
  9941. const size_t s_kv = ctx->kv_self.total_size();
  9942. const size_t s_total = (
  9943. + s_rng_size
  9944. + s_rng
  9945. + s_logits_size
  9946. + s_logits
  9947. + s_embedding_size
  9948. + s_embedding
  9949. + s_kv_size
  9950. + s_kv_ntok
  9951. + s_kv
  9952. );
  9953. return s_total;
  9954. }
  9955. // llama_context_data
  9956. struct llama_data_context {
  9957. virtual void write(const void * src, size_t size) = 0;
  9958. virtual size_t get_size_written() = 0;
  9959. virtual ~llama_data_context() = default;
  9960. };
  9961. struct llama_data_buffer_context : llama_data_context {
  9962. uint8_t * ptr;
  9963. size_t size_written = 0;
  9964. llama_data_buffer_context(uint8_t * p) : ptr(p) {}
  9965. void write(const void * src, size_t size) override {
  9966. memcpy(ptr, src, size);
  9967. ptr += size;
  9968. size_written += size;
  9969. }
  9970. size_t get_size_written() override {
  9971. return size_written;
  9972. }
  9973. };
  9974. struct llama_data_file_context : llama_data_context {
  9975. llama_file * file;
  9976. size_t size_written = 0;
  9977. llama_data_file_context(llama_file * f) : file(f) {}
  9978. void write(const void * src, size_t size) override {
  9979. file->write_raw(src, size);
  9980. size_written += size;
  9981. }
  9982. size_t get_size_written() override {
  9983. return size_written;
  9984. }
  9985. };
  9986. /** copy state data into either a buffer or file depending on the passed in context
  9987. *
  9988. * file context:
  9989. * llama_file file("/path", "wb");
  9990. * llama_data_file_context data_ctx(&file);
  9991. * llama_copy_state_data(ctx, &data_ctx);
  9992. *
  9993. * buffer context:
  9994. * std::vector<uint8_t> buf(max_size, 0);
  9995. * llama_data_buffer_context data_ctx(&buf.data());
  9996. * llama_copy_state_data(ctx, &data_ctx);
  9997. *
  9998. */
  9999. static void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
  10000. // copy rng
  10001. {
  10002. std::ostringstream rng_ss;
  10003. rng_ss << ctx->rng;
  10004. const std::string & rng_str = rng_ss.str();
  10005. const size_t rng_size = rng_str.size();
  10006. GGML_ASSERT(rng_size <= LLAMA_MAX_RNG_STATE);
  10007. data_ctx->write(&rng_size, sizeof(rng_size));
  10008. data_ctx->write(rng_str.data(), rng_size);
  10009. }
  10010. // copy logits
  10011. {
  10012. const size_t logits_size = ctx->logits.size();
  10013. data_ctx->write(&logits_size, sizeof(logits_size));
  10014. if (logits_size) {
  10015. data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
  10016. }
  10017. }
  10018. // copy embeddings
  10019. {
  10020. const size_t embedding_size = ctx->embedding.size();
  10021. data_ctx->write(&embedding_size, sizeof(embedding_size));
  10022. if (embedding_size) {
  10023. data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float));
  10024. }
  10025. }
  10026. // copy kv cache
  10027. {
  10028. const auto & kv_self = ctx->kv_self;
  10029. const auto & hparams = ctx->model.hparams;
  10030. const auto & cparams = ctx->cparams;
  10031. const auto n_layer = hparams.n_layer;
  10032. const auto n_embd_k_gqa = hparams.n_embd_k_gqa();
  10033. const auto n_embd_v_gqa = hparams.n_embd_v_gqa();
  10034. const auto n_ctx = cparams.n_ctx;
  10035. const size_t kv_buf_size = kv_self.total_size();
  10036. const uint32_t kv_head = kv_self.head;
  10037. const uint32_t kv_size = kv_self.size;
  10038. const uint32_t kv_used = kv_self.used;
  10039. data_ctx->write(&kv_buf_size, sizeof(kv_buf_size));
  10040. data_ctx->write(&kv_head, sizeof(kv_head));
  10041. data_ctx->write(&kv_size, sizeof(kv_size));
  10042. data_ctx->write(&kv_used, sizeof(kv_used));
  10043. if (kv_buf_size) {
  10044. const size_t elt_size = ggml_element_size(kv_self.k_l[0]);
  10045. std::vector<uint8_t> tmp_buf;
  10046. for (int il = 0; il < (int) n_layer; ++il) {
  10047. tmp_buf.resize(elt_size*n_embd_k_gqa*kv_head);
  10048. ggml_backend_tensor_get(kv_self.k_l[il], tmp_buf.data(), 0, tmp_buf.size());
  10049. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  10050. // v is not contiguous, copy row by row
  10051. tmp_buf.resize(elt_size*kv_head);
  10052. for (int ir = 0; ir < (int) n_embd_v_gqa; ++ir) {
  10053. ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), ir*elt_size*n_ctx, tmp_buf.size());
  10054. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  10055. }
  10056. }
  10057. }
  10058. for (uint32_t i = 0; i < kv_size; ++i) {
  10059. const auto & cell = kv_self.cells[i];
  10060. const llama_pos pos = cell.pos;
  10061. const size_t seq_id_size = cell.seq_id.size();
  10062. data_ctx->write(&pos, sizeof(pos));
  10063. data_ctx->write(&seq_id_size, sizeof(seq_id_size));
  10064. for (auto seq_id : cell.seq_id) {
  10065. data_ctx->write(&seq_id, sizeof(seq_id));
  10066. }
  10067. }
  10068. }
  10069. }
  10070. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  10071. llama_data_buffer_context data_ctx(dst);
  10072. llama_copy_state_data_internal(ctx, &data_ctx);
  10073. return data_ctx.get_size_written();
  10074. }
  10075. // Sets the state reading from the specified source address
  10076. size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
  10077. uint8_t * inp = src;
  10078. // set rng
  10079. {
  10080. size_t rng_size;
  10081. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  10082. GGML_ASSERT(rng_size <= LLAMA_MAX_RNG_STATE);
  10083. std::string rng_str((char *)inp, rng_size); inp += rng_size;
  10084. std::istringstream rng_ss(rng_str);
  10085. rng_ss >> ctx->rng;
  10086. GGML_ASSERT(!rng_ss.fail());
  10087. }
  10088. // set logits
  10089. {
  10090. size_t logits_size;
  10091. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  10092. GGML_ASSERT(ctx->logits.capacity() >= logits_size);
  10093. if (logits_size) {
  10094. ctx->logits.resize(logits_size);
  10095. memcpy(ctx->logits.data(), inp, logits_size * sizeof(float));
  10096. inp += logits_size * sizeof(float);
  10097. }
  10098. }
  10099. // set embeddings
  10100. {
  10101. size_t embedding_size;
  10102. memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size);
  10103. GGML_ASSERT(ctx->embedding.capacity() == embedding_size);
  10104. if (embedding_size) {
  10105. memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float));
  10106. inp += embedding_size * sizeof(float);
  10107. }
  10108. }
  10109. // set kv cache
  10110. {
  10111. const auto & kv_self = ctx->kv_self;
  10112. const auto & hparams = ctx->model.hparams;
  10113. const auto & cparams = ctx->cparams;
  10114. const int n_layer = hparams.n_layer;
  10115. const int n_embd_k_gqa = hparams.n_embd_k_gqa();
  10116. const int n_embd_v_gqa = hparams.n_embd_v_gqa();
  10117. const int n_ctx = cparams.n_ctx;
  10118. size_t kv_buf_size;
  10119. uint32_t kv_head;
  10120. uint32_t kv_size;
  10121. uint32_t kv_used;
  10122. memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size);
  10123. memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head);
  10124. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  10125. memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used);
  10126. if (kv_buf_size) {
  10127. GGML_ASSERT(kv_self.total_size() == kv_buf_size);
  10128. const size_t elt_size = ggml_element_size(kv_self.k_l[0]);
  10129. for (int il = 0; il < (int) n_layer; ++il) {
  10130. size_t k_size = elt_size*n_embd_k_gqa*kv_head;
  10131. ggml_backend_tensor_set(kv_self.k_l[il], inp, 0, k_size);
  10132. inp += k_size;
  10133. // v is not contiguous, copy row by row
  10134. size_t v_row_size = elt_size*kv_head;
  10135. for (int ir = 0; ir < (int) n_embd_v_gqa; ++ir) {
  10136. ggml_backend_tensor_set(kv_self.v_l[il], inp, ir*elt_size*n_ctx, v_row_size);
  10137. inp += v_row_size;
  10138. }
  10139. }
  10140. }
  10141. ctx->kv_self.head = kv_head;
  10142. ctx->kv_self.size = kv_size;
  10143. ctx->kv_self.used = kv_used;
  10144. ctx->kv_self.cells.resize(kv_size);
  10145. for (uint32_t i = 0; i < kv_size; ++i) {
  10146. llama_pos pos;
  10147. size_t seq_id_size;
  10148. memcpy(&pos, inp, sizeof(pos)); inp += sizeof(pos);
  10149. memcpy(&seq_id_size, inp, sizeof(seq_id_size)); inp += sizeof(seq_id_size);
  10150. ctx->kv_self.cells[i].pos = pos;
  10151. llama_seq_id seq_id;
  10152. for (size_t j = 0; j < seq_id_size; ++j) {
  10153. memcpy(&seq_id, inp, sizeof(seq_id)); inp += sizeof(seq_id);
  10154. ctx->kv_self.cells[i].seq_id.insert(seq_id);
  10155. }
  10156. }
  10157. }
  10158. const size_t nread = inp - src;
  10159. const size_t max_size = llama_get_state_size(ctx);
  10160. GGML_ASSERT(nread <= max_size);
  10161. return nread;
  10162. }
  10163. static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  10164. llama_file file(path_session, "rb");
  10165. // sanity checks
  10166. {
  10167. const uint32_t magic = file.read_u32();
  10168. const uint32_t version = file.read_u32();
  10169. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  10170. LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  10171. return false;
  10172. }
  10173. llama_hparams session_hparams;
  10174. file.read_raw(&session_hparams, sizeof(llama_hparams));
  10175. if (session_hparams != ctx->model.hparams) {
  10176. LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
  10177. return false;
  10178. }
  10179. }
  10180. // load the prompt
  10181. {
  10182. const uint32_t n_token_count = file.read_u32();
  10183. if (n_token_count > n_token_capacity) {
  10184. LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  10185. return false;
  10186. }
  10187. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  10188. *n_token_count_out = n_token_count;
  10189. }
  10190. // restore the context state
  10191. {
  10192. const size_t n_state_size_cur = file.size - file.tell();
  10193. const size_t n_state_size_max = llama_get_state_size(ctx);
  10194. if (n_state_size_cur > n_state_size_max) {
  10195. LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  10196. return false;
  10197. }
  10198. std::vector<uint8_t> state_data(n_state_size_max);
  10199. file.read_raw(state_data.data(), n_state_size_cur);
  10200. llama_set_state_data(ctx, state_data.data());
  10201. }
  10202. return true;
  10203. }
  10204. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  10205. try {
  10206. return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  10207. } catch (const std::exception & err) {
  10208. LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
  10209. return false;
  10210. }
  10211. }
  10212. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  10213. llama_file file(path_session, "wb");
  10214. file.write_u32(LLAMA_SESSION_MAGIC);
  10215. file.write_u32(LLAMA_SESSION_VERSION);
  10216. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  10217. // save the prompt
  10218. file.write_u32((uint32_t) n_token_count);
  10219. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  10220. // save the context state using stream saving
  10221. llama_data_file_context data_ctx(&file);
  10222. llama_copy_state_data_internal(ctx, &data_ctx);
  10223. return true;
  10224. }
  10225. int llama_eval(
  10226. struct llama_context * ctx,
  10227. llama_token * tokens,
  10228. int32_t n_tokens,
  10229. int32_t n_past) {
  10230. llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
  10231. const int ret = llama_decode_internal(*ctx, llama_batch_get_one(tokens, n_tokens, n_past, 0));
  10232. if (ret < 0) {
  10233. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  10234. }
  10235. return ret;
  10236. }
  10237. int llama_eval_embd(
  10238. struct llama_context * ctx,
  10239. float * embd,
  10240. int32_t n_tokens,
  10241. int32_t n_past) {
  10242. llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
  10243. llama_batch batch = { n_tokens, nullptr, embd, nullptr, nullptr, nullptr, nullptr, n_past, 1, 0, };
  10244. const int ret = llama_decode_internal(*ctx, batch);
  10245. if (ret < 0) {
  10246. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  10247. }
  10248. return ret;
  10249. }
  10250. void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch) {
  10251. ctx->cparams.n_threads = n_threads;
  10252. ctx->cparams.n_threads_batch = n_threads_batch;
  10253. }
  10254. struct llama_batch llama_batch_get_one(
  10255. llama_token * tokens,
  10256. int32_t n_tokens,
  10257. llama_pos pos_0,
  10258. llama_seq_id seq_id) {
  10259. return {
  10260. /*n_tokens =*/ n_tokens,
  10261. /*tokens =*/ tokens,
  10262. /*embd =*/ nullptr,
  10263. /*pos =*/ nullptr,
  10264. /*n_seq_id =*/ nullptr,
  10265. /*seq_id =*/ nullptr,
  10266. /*logits =*/ nullptr,
  10267. /*all_pos_0 =*/ pos_0,
  10268. /*all_pos_1 =*/ 1,
  10269. /*all_seq_id =*/ seq_id,
  10270. };
  10271. }
  10272. struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
  10273. llama_batch batch = { 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, 0, 0, 0, };
  10274. if (embd) {
  10275. batch.embd = (float *) malloc(sizeof(float) * n_tokens_alloc * embd);
  10276. } else {
  10277. batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens_alloc);
  10278. }
  10279. batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens_alloc);
  10280. batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens_alloc);
  10281. batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * (n_tokens_alloc + 1));
  10282. for (int i = 0; i < n_tokens_alloc; ++i) {
  10283. batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
  10284. }
  10285. batch.seq_id[n_tokens_alloc] = nullptr;
  10286. batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens_alloc);
  10287. return batch;
  10288. }
  10289. void llama_batch_free(struct llama_batch batch) {
  10290. if (batch.token) free(batch.token);
  10291. if (batch.embd) free(batch.embd);
  10292. if (batch.pos) free(batch.pos);
  10293. if (batch.n_seq_id) free(batch.n_seq_id);
  10294. if (batch.seq_id) {
  10295. for (int i = 0; batch.seq_id[i] != nullptr; ++i) {
  10296. free(batch.seq_id[i]);
  10297. }
  10298. free(batch.seq_id);
  10299. }
  10300. if (batch.logits) free(batch.logits);
  10301. }
  10302. int32_t llama_decode(
  10303. struct llama_context * ctx,
  10304. struct llama_batch batch) {
  10305. const int ret = llama_decode_internal(*ctx, batch);
  10306. if (ret < 0) {
  10307. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  10308. }
  10309. return ret;
  10310. }
  10311. float * llama_get_logits(struct llama_context * ctx) {
  10312. return ctx->logits.data();
  10313. }
  10314. float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
  10315. assert(ctx->logits_valid.at(i));
  10316. return ctx->logits.data() + i*ctx->model.hparams.n_vocab;
  10317. }
  10318. float * llama_get_embeddings(struct llama_context * ctx) {
  10319. return ctx->embedding.data();
  10320. }
  10321. float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
  10322. return ctx->embedding.data() + i*ctx->model.hparams.n_embd;
  10323. }
  10324. const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
  10325. return model->vocab.id_to_token[token].text.c_str();
  10326. }
  10327. float llama_token_get_score(const struct llama_model * model, llama_token token) {
  10328. return model->vocab.id_to_token[token].score;
  10329. }
  10330. llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token) {
  10331. return model->vocab.id_to_token[token].type;
  10332. }
  10333. llama_token llama_token_bos(const struct llama_model * model) {
  10334. return model->vocab.special_bos_id;
  10335. }
  10336. llama_token llama_token_eos(const struct llama_model * model) {
  10337. return model->vocab.special_eos_id;
  10338. }
  10339. llama_token llama_token_nl(const struct llama_model * model) {
  10340. return model->vocab.linefeed_id;
  10341. }
  10342. int32_t llama_add_bos_token(const struct llama_model * model) {
  10343. return model->vocab.special_add_bos;
  10344. }
  10345. int32_t llama_add_eos_token(const struct llama_model * model) {
  10346. return model->vocab.special_add_eos;
  10347. }
  10348. llama_token llama_token_prefix(const struct llama_model * model) {
  10349. return model->vocab.special_prefix_id;
  10350. }
  10351. llama_token llama_token_middle(const struct llama_model * model) {
  10352. return model->vocab.special_middle_id;
  10353. }
  10354. llama_token llama_token_suffix(const struct llama_model * model) {
  10355. return model->vocab.special_suffix_id;
  10356. }
  10357. llama_token llama_token_eot(const struct llama_model * model) {
  10358. return model->vocab.special_eot_id;
  10359. }
  10360. int32_t llama_tokenize(
  10361. const struct llama_model * model,
  10362. const char * text,
  10363. int32_t text_len,
  10364. llama_token * tokens,
  10365. int32_t n_max_tokens,
  10366. bool add_bos,
  10367. bool special) {
  10368. auto res = llama_tokenize_internal(model->vocab, std::string(text, text_len), add_bos, special);
  10369. if (n_max_tokens < (int) res.size()) {
  10370. // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
  10371. return -((int) res.size());
  10372. }
  10373. for (size_t i = 0; i < res.size(); i++) {
  10374. tokens[i] = res[i];
  10375. }
  10376. return res.size();
  10377. }
  10378. static std::string llama_decode_text(const std::string & text) {
  10379. std::string decoded_text;
  10380. auto unicode_sequences = codepoints_from_utf8(text);
  10381. for (auto& unicode_sequence : unicode_sequences) {
  10382. decoded_text += unicode_to_bytes_bpe(codepoint_to_utf8(unicode_sequence));
  10383. }
  10384. return decoded_text;
  10385. }
  10386. // does not write null-terminator to buf
  10387. int32_t llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int32_t length) {
  10388. if (0 <= token && token < llama_n_vocab(model)) {
  10389. switch (llama_vocab_get_type(model->vocab)) {
  10390. case LLAMA_VOCAB_TYPE_WPM:
  10391. case LLAMA_VOCAB_TYPE_SPM: {
  10392. // NOTE: we accept all unsupported token types,
  10393. // suppressing them like CONTROL tokens.
  10394. if (llama_is_normal_token(model->vocab, token)) {
  10395. std::string result = model->vocab.id_to_token[token].text;
  10396. llama_unescape_whitespace(result);
  10397. if (length < (int) result.length()) {
  10398. return -(int) result.length();
  10399. }
  10400. memcpy(buf, result.c_str(), result.length());
  10401. return result.length();
  10402. } else if (llama_is_user_defined_token(model->vocab, token)) {
  10403. std::string result = model->vocab.id_to_token[token].text;
  10404. if (length < (int) result.length()) {
  10405. return -result.length();
  10406. }
  10407. memcpy(buf, result.c_str(), result.length());
  10408. return result.length();
  10409. } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
  10410. if (length < 3) {
  10411. return -3;
  10412. }
  10413. memcpy(buf, "\xe2\x96\x85", 3);
  10414. return 3;
  10415. } else if (llama_is_control_token(model->vocab, token)) {
  10416. ;
  10417. } else if (llama_is_byte_token(model->vocab, token)) {
  10418. if (length < 1) {
  10419. return -1;
  10420. }
  10421. buf[0] = llama_token_to_byte(model->vocab, token);
  10422. return 1;
  10423. }
  10424. break;
  10425. }
  10426. case LLAMA_VOCAB_TYPE_BPE: {
  10427. // NOTE: we accept all unsupported token types,
  10428. // suppressing them like CONTROL tokens.
  10429. if (llama_is_normal_token(model->vocab, token)) {
  10430. std::string result = model->vocab.id_to_token[token].text;
  10431. result = llama_decode_text(result);
  10432. if (length < (int) result.length()) {
  10433. return -(int) result.length();
  10434. }
  10435. memcpy(buf, result.c_str(), result.length());
  10436. return result.length();
  10437. } else if (llama_is_user_defined_token(model->vocab, token)) {
  10438. std::string result = model->vocab.id_to_token[token].text;
  10439. if (length < (int) result.length()) {
  10440. return -result.length();
  10441. }
  10442. memcpy(buf, result.c_str(), result.length());
  10443. return result.length();
  10444. } else if (llama_is_control_token(model->vocab, token)) {
  10445. ;
  10446. }
  10447. break;
  10448. }
  10449. default:
  10450. GGML_ASSERT(false);
  10451. }
  10452. }
  10453. return 0;
  10454. }
  10455. // trim whitespace from the beginning and end of a string
  10456. static std::string trim(const std::string & str) {
  10457. size_t start = 0;
  10458. size_t end = str.size();
  10459. while (start < end && isspace(str[start])) {
  10460. start += 1;
  10461. }
  10462. while (end > start && isspace(str[end - 1])) {
  10463. end -= 1;
  10464. }
  10465. return str.substr(start, end - start);
  10466. }
  10467. // Simple version of "llama_apply_chat_template" that only works with strings
  10468. // This function uses heuristic checks to determine commonly used template. It is not a jinja parser.
  10469. static int32_t llama_chat_apply_template_internal(
  10470. const std::string & tmpl,
  10471. const std::vector<const llama_chat_message *> & chat,
  10472. std::string & dest, bool add_ass) {
  10473. // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527
  10474. std::stringstream ss;
  10475. if (tmpl.find("<|im_start|>") != std::string::npos) {
  10476. // chatml template
  10477. for (auto message : chat) {
  10478. ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n";
  10479. }
  10480. if (add_ass) {
  10481. ss << "<|im_start|>assistant\n";
  10482. }
  10483. } else if (tmpl.find("[INST]") != std::string::npos) {
  10484. // llama2 template and its variants
  10485. // [variant] support system message
  10486. bool support_system_message = tmpl.find("<<SYS>>") != std::string::npos;
  10487. // [variant] space before + after response
  10488. bool space_around_response = tmpl.find("' ' + eos_token") != std::string::npos;
  10489. // [variant] add BOS inside history
  10490. bool add_bos_inside_history = tmpl.find("bos_token + '[INST]") != std::string::npos;
  10491. // [variant] trim spaces from the input message
  10492. bool strip_message = tmpl.find("content.strip()") != std::string::npos;
  10493. // construct the prompt
  10494. bool is_inside_turn = true; // skip BOS at the beginning
  10495. ss << "[INST] ";
  10496. for (auto message : chat) {
  10497. std::string content = strip_message ? trim(message->content) : message->content;
  10498. std::string role(message->role);
  10499. if (!is_inside_turn) {
  10500. is_inside_turn = true;
  10501. ss << (add_bos_inside_history ? "<s>[INST] " : "[INST] ");
  10502. }
  10503. if (role == "system") {
  10504. if (support_system_message) {
  10505. ss << "<<SYS>>\n" << content << "\n<</SYS>>\n\n";
  10506. } else {
  10507. // if the model does not support system message, we still include it in the first message, but without <<SYS>>
  10508. ss << content << "\n";
  10509. }
  10510. } else if (role == "user") {
  10511. ss << content << " [/INST]";
  10512. } else {
  10513. ss << (space_around_response ? " " : "") << content << (space_around_response ? " " : "") << "</s>";
  10514. is_inside_turn = false;
  10515. }
  10516. }
  10517. // llama2 templates seem to not care about "add_generation_prompt"
  10518. } else if (tmpl.find("<|user|>") != std::string::npos) {
  10519. // zephyr template
  10520. for (auto message : chat) {
  10521. ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n";
  10522. }
  10523. if (add_ass) {
  10524. ss << "<|assistant|>\n";
  10525. }
  10526. } else {
  10527. // template not supported
  10528. return -1;
  10529. }
  10530. dest = ss.str();
  10531. return dest.size();
  10532. }
  10533. LLAMA_API int32_t llama_chat_apply_template(
  10534. const struct llama_model * model,
  10535. const char * tmpl,
  10536. const struct llama_chat_message * chat,
  10537. size_t n_msg,
  10538. bool add_ass,
  10539. char * buf,
  10540. int32_t length) {
  10541. std::string curr_tmpl(tmpl == nullptr ? "" : tmpl);
  10542. if (tmpl == nullptr) {
  10543. GGML_ASSERT(model != nullptr);
  10544. // load template from model
  10545. std::vector<char> model_template(2048, 0); // longest known template is about 1200 bytes
  10546. std::string template_key = "tokenizer.chat_template";
  10547. int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), curr_tmpl.size());
  10548. if (res < 0) {
  10549. // worst case: there is no information about template, we will use chatml by default
  10550. curr_tmpl = "<|im_start|>"; // see llama_chat_apply_template_internal
  10551. } else {
  10552. curr_tmpl = std::string(model_template.data(), model_template.size());
  10553. }
  10554. }
  10555. // format the chat to string
  10556. std::vector<const llama_chat_message *> chat_vec;
  10557. chat_vec.resize(n_msg);
  10558. for (size_t i = 0; i < n_msg; i++) {
  10559. chat_vec[i] = &chat[i];
  10560. }
  10561. std::string formatted_chat;
  10562. int32_t res = llama_chat_apply_template_internal(curr_tmpl, chat_vec, formatted_chat, add_ass);
  10563. if (res < 0) {
  10564. return res;
  10565. }
  10566. strncpy(buf, formatted_chat.c_str(), length);
  10567. return res;
  10568. }
  10569. struct llama_timings llama_get_timings(struct llama_context * ctx) {
  10570. struct llama_timings result = {
  10571. /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
  10572. /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
  10573. /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
  10574. /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
  10575. /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
  10576. /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
  10577. /*.n_sample =*/ std::max(1, ctx->n_sample),
  10578. /*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
  10579. /*.n_eval =*/ std::max(1, ctx->n_eval),
  10580. };
  10581. return result;
  10582. }
  10583. void llama_print_timings(struct llama_context * ctx) {
  10584. const llama_timings timings = llama_get_timings(ctx);
  10585. LLAMA_LOG_INFO("\n");
  10586. LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms);
  10587. LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  10588. __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
  10589. LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  10590. __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
  10591. LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  10592. __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
  10593. LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (timings.t_end_ms - timings.t_start_ms), (timings.n_p_eval + timings.n_eval));
  10594. }
  10595. void llama_reset_timings(struct llama_context * ctx) {
  10596. ctx->t_start_us = ggml_time_us();
  10597. ctx->t_sample_us = ctx->n_sample = 0;
  10598. ctx->t_eval_us = ctx->n_eval = 0;
  10599. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  10600. }
  10601. const char * llama_print_system_info(void) {
  10602. static std::string s;
  10603. s = "";
  10604. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  10605. s += "AVX_VNNI = " + std::to_string(ggml_cpu_has_avx_vnni()) + " | ";
  10606. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  10607. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  10608. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  10609. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  10610. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  10611. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  10612. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  10613. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  10614. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  10615. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  10616. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  10617. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  10618. s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
  10619. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  10620. s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
  10621. return s.c_str();
  10622. }
  10623. void llama_dump_timing_info_yaml(FILE * stream, const llama_context * ctx) {
  10624. fprintf(stream, "\n");
  10625. fprintf(stream, "###########\n");
  10626. fprintf(stream, "# Timings #\n");
  10627. fprintf(stream, "###########\n");
  10628. fprintf(stream, "\n");
  10629. fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
  10630. 1.0e-3 * ctx->t_eval_us / ctx->n_eval);
  10631. fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
  10632. 1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
  10633. fprintf(stream, "mst_sample: %.2f # ms / token during sampling\n",
  10634. 1.0e-3 * ctx->t_sample_us / ctx->n_sample);
  10635. fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
  10636. fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
  10637. fprintf(stream, "n_sample: %d # number of sampled tokens\n", ctx->n_sample);
  10638. fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
  10639. fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
  10640. fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
  10641. fprintf(stream, "t_sample_us: %" PRId64 " # total microseconds spent sampling\n", ctx->t_sample_us);
  10642. fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
  10643. 1.0e6 * ctx->n_eval / ctx->t_eval_us);
  10644. fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
  10645. 1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
  10646. fprintf(stream, "ts_sample: %.2f # tokens / second during sampling\n",
  10647. 1.0e6 * ctx->n_sample / ctx->t_sample_us);
  10648. }
  10649. // For internal test use
  10650. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
  10651. struct llama_context * ctx
  10652. ) {
  10653. return ctx->model.tensors_by_name;
  10654. }
  10655. void llama_log_set(ggml_log_callback log_callback, void * user_data) {
  10656. g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
  10657. g_state.log_callback_user_data = user_data;
  10658. #ifdef GGML_USE_METAL
  10659. ggml_backend_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
  10660. #endif
  10661. }
  10662. static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
  10663. va_list args_copy;
  10664. va_copy(args_copy, args);
  10665. char buffer[128];
  10666. int len = vsnprintf(buffer, 128, format, args);
  10667. if (len < 128) {
  10668. g_state.log_callback(level, buffer, g_state.log_callback_user_data);
  10669. } else {
  10670. char* buffer2 = new char[len+1];
  10671. vsnprintf(buffer2, len+1, format, args_copy);
  10672. buffer2[len] = 0;
  10673. g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
  10674. delete[] buffer2;
  10675. }
  10676. va_end(args_copy);
  10677. }
  10678. static void llama_log_internal(ggml_log_level level, const char * format, ...) {
  10679. va_list args;
  10680. va_start(args, format);
  10681. llama_log_internal_v(level, format, args);
  10682. va_end(args);
  10683. }
  10684. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
  10685. (void) level;
  10686. (void) user_data;
  10687. fputs(text, stderr);
  10688. fflush(stderr);
  10689. }