llama.cpp 418 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030
  1. #define LLAMA_API_INTERNAL
  2. //#define LLAMA_GGML_BACKEND_CUDA_TEST // for testing only - enables ggml-cuda through ggml-backend, disables partial offloading
  3. #include "llama.h"
  4. #include "unicode.h"
  5. #include "ggml.h"
  6. #include "ggml-alloc.h"
  7. #include "ggml-backend.h"
  8. #ifdef GGML_USE_CUBLAS
  9. # include "ggml-cuda.h"
  10. #elif defined(GGML_USE_CLBLAST)
  11. # include "ggml-opencl.h"
  12. #endif
  13. #ifdef GGML_USE_METAL
  14. # include "ggml-metal.h"
  15. #endif
  16. #ifdef GGML_USE_MPI
  17. # include "ggml-mpi.h"
  18. #endif
  19. #ifndef QK_K
  20. # ifdef GGML_QKK_64
  21. # define QK_K 64
  22. # else
  23. # define QK_K 256
  24. # endif
  25. #endif
  26. #ifdef __has_include
  27. #if __has_include(<unistd.h>)
  28. #include <unistd.h>
  29. #if defined(_POSIX_MAPPED_FILES)
  30. #include <sys/mman.h>
  31. #include <fcntl.h>
  32. #endif
  33. #if defined(_POSIX_MEMLOCK_RANGE)
  34. #include <sys/resource.h>
  35. #endif
  36. #endif
  37. #endif
  38. #if defined(_WIN32)
  39. #define WIN32_LEAN_AND_MEAN
  40. #ifndef NOMINMAX
  41. #define NOMINMAX
  42. #endif
  43. #include <windows.h>
  44. #include <io.h>
  45. #endif
  46. #include <algorithm>
  47. #include <array>
  48. #include <cassert>
  49. #include <cinttypes>
  50. #include <climits>
  51. #include <cmath>
  52. #include <cstdarg>
  53. #include <cstddef>
  54. #include <cstdint>
  55. #include <cstdio>
  56. #include <cstring>
  57. #include <ctime>
  58. #include <forward_list>
  59. #include <fstream>
  60. #include <functional>
  61. #include <initializer_list>
  62. #include <map>
  63. #include <memory>
  64. #include <mutex>
  65. #include <numeric>
  66. #include <queue>
  67. #include <random>
  68. #include <regex>
  69. #include <set>
  70. #include <sstream>
  71. #include <thread>
  72. #include <type_traits>
  73. #include <unordered_map>
  74. #if defined(_MSC_VER)
  75. #pragma warning(disable: 4244 4267) // possible loss of data
  76. #endif
  77. #ifdef __GNUC__
  78. #ifdef __MINGW32__
  79. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  80. #else
  81. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  82. #endif
  83. #else
  84. #define LLAMA_ATTRIBUTE_FORMAT(...)
  85. #endif
  86. #define LLAMA_MAX_NODES 8192
  87. #define LLAMA_MAX_EXPERTS 8
  88. //
  89. // logging
  90. //
  91. LLAMA_ATTRIBUTE_FORMAT(2, 3)
  92. static void llama_log_internal (ggml_log_level level, const char* format, ...);
  93. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
  94. #define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
  95. #define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
  96. #define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  97. //
  98. // helpers
  99. //
  100. static size_t utf8_len(char src) {
  101. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  102. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  103. return lookup[highbits];
  104. }
  105. static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
  106. std::string result;
  107. for (size_t pos = 0; ; pos += search.length()) {
  108. auto new_pos = s.find(search, pos);
  109. if (new_pos == std::string::npos) {
  110. result += s.substr(pos, s.size() - pos);
  111. break;
  112. }
  113. result += s.substr(pos, new_pos - pos) + replace;
  114. pos = new_pos;
  115. }
  116. s = std::move(result);
  117. }
  118. static bool is_float_close(float a, float b, float abs_tol) {
  119. // Check for non-negative tolerance
  120. if (abs_tol < 0.0) {
  121. throw std::invalid_argument("Tolerance must be non-negative");
  122. }
  123. // Exact equality check
  124. if (a == b) {
  125. return true;
  126. }
  127. // Check for infinities
  128. if (std::isinf(a) || std::isinf(b)) {
  129. return false;
  130. }
  131. // Regular comparison using the provided absolute tolerance
  132. return std::fabs(b - a) <= abs_tol;
  133. }
  134. #ifdef GGML_USE_CPU_HBM
  135. #include <hbwmalloc.h>
  136. #endif
  137. static void zeros(std::ofstream & file, size_t n) {
  138. char zero = 0;
  139. for (size_t i = 0; i < n; ++i) {
  140. file.write(&zero, 1);
  141. }
  142. }
  143. LLAMA_ATTRIBUTE_FORMAT(1, 2)
  144. static std::string format(const char * fmt, ...) {
  145. va_list ap;
  146. va_list ap2;
  147. va_start(ap, fmt);
  148. va_copy(ap2, ap);
  149. int size = vsnprintf(NULL, 0, fmt, ap);
  150. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  151. std::vector<char> buf(size + 1);
  152. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  153. GGML_ASSERT(size2 == size);
  154. va_end(ap2);
  155. va_end(ap);
  156. return std::string(buf.data(), size);
  157. }
  158. //
  159. // gguf constants (sync with gguf.py)
  160. //
  161. enum llm_arch {
  162. LLM_ARCH_LLAMA,
  163. LLM_ARCH_FALCON,
  164. LLM_ARCH_BAICHUAN,
  165. LLM_ARCH_GPT2,
  166. LLM_ARCH_GPTJ,
  167. LLM_ARCH_GPTNEOX,
  168. LLM_ARCH_MPT,
  169. LLM_ARCH_STARCODER,
  170. LLM_ARCH_PERSIMMON,
  171. LLM_ARCH_REFACT,
  172. LLM_ARCH_BLOOM,
  173. LLM_ARCH_STABLELM,
  174. LLM_ARCH_QWEN,
  175. LLM_ARCH_PHI2,
  176. LLM_ARCH_PLAMO,
  177. LLM_ARCH_UNKNOWN,
  178. };
  179. static std::map<llm_arch, std::string> LLM_ARCH_NAMES = {
  180. { LLM_ARCH_LLAMA, "llama" },
  181. { LLM_ARCH_FALCON, "falcon" },
  182. { LLM_ARCH_GPT2, "gpt2" },
  183. { LLM_ARCH_GPTJ, "gptj" },
  184. { LLM_ARCH_GPTNEOX, "gptneox" },
  185. { LLM_ARCH_MPT, "mpt" },
  186. { LLM_ARCH_BAICHUAN, "baichuan" },
  187. { LLM_ARCH_STARCODER, "starcoder" },
  188. { LLM_ARCH_PERSIMMON, "persimmon" },
  189. { LLM_ARCH_REFACT, "refact" },
  190. { LLM_ARCH_BLOOM, "bloom" },
  191. { LLM_ARCH_STABLELM, "stablelm" },
  192. { LLM_ARCH_QWEN, "qwen" },
  193. { LLM_ARCH_PHI2, "phi2" },
  194. { LLM_ARCH_PLAMO, "plamo" },
  195. };
  196. enum llm_kv {
  197. LLM_KV_GENERAL_ARCHITECTURE,
  198. LLM_KV_GENERAL_QUANTIZATION_VERSION,
  199. LLM_KV_GENERAL_ALIGNMENT,
  200. LLM_KV_GENERAL_NAME,
  201. LLM_KV_GENERAL_AUTHOR,
  202. LLM_KV_GENERAL_URL,
  203. LLM_KV_GENERAL_DESCRIPTION,
  204. LLM_KV_GENERAL_LICENSE,
  205. LLM_KV_GENERAL_SOURCE_URL,
  206. LLM_KV_GENERAL_SOURCE_HF_REPO,
  207. LLM_KV_CONTEXT_LENGTH,
  208. LLM_KV_EMBEDDING_LENGTH,
  209. LLM_KV_BLOCK_COUNT,
  210. LLM_KV_FEED_FORWARD_LENGTH,
  211. LLM_KV_USE_PARALLEL_RESIDUAL,
  212. LLM_KV_TENSOR_DATA_LAYOUT,
  213. LLM_KV_EXPERT_COUNT,
  214. LLM_KV_EXPERT_USED_COUNT,
  215. LLM_KV_ATTENTION_HEAD_COUNT,
  216. LLM_KV_ATTENTION_HEAD_COUNT_KV,
  217. LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
  218. LLM_KV_ATTENTION_CLAMP_KQV,
  219. LLM_KV_ATTENTION_KEY_LENGTH,
  220. LLM_KV_ATTENTION_VALUE_LENGTH,
  221. LLM_KV_ATTENTION_LAYERNORM_EPS,
  222. LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
  223. LLM_KV_ROPE_DIMENSION_COUNT,
  224. LLM_KV_ROPE_FREQ_BASE,
  225. LLM_KV_ROPE_SCALE_LINEAR,
  226. LLM_KV_ROPE_SCALING_TYPE,
  227. LLM_KV_ROPE_SCALING_FACTOR,
  228. LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
  229. LLM_KV_ROPE_SCALING_FINETUNED,
  230. LLM_KV_TOKENIZER_MODEL,
  231. LLM_KV_TOKENIZER_LIST,
  232. LLM_KV_TOKENIZER_TOKEN_TYPE,
  233. LLM_KV_TOKENIZER_SCORES,
  234. LLM_KV_TOKENIZER_MERGES,
  235. LLM_KV_TOKENIZER_BOS_ID,
  236. LLM_KV_TOKENIZER_EOS_ID,
  237. LLM_KV_TOKENIZER_UNK_ID,
  238. LLM_KV_TOKENIZER_SEP_ID,
  239. LLM_KV_TOKENIZER_PAD_ID,
  240. LLM_KV_TOKENIZER_ADD_BOS,
  241. LLM_KV_TOKENIZER_ADD_EOS,
  242. LLM_KV_TOKENIZER_HF_JSON,
  243. LLM_KV_TOKENIZER_RWKV,
  244. };
  245. static std::map<llm_kv, std::string> LLM_KV_NAMES = {
  246. { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
  247. { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
  248. { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
  249. { LLM_KV_GENERAL_NAME, "general.name" },
  250. { LLM_KV_GENERAL_AUTHOR, "general.author" },
  251. { LLM_KV_GENERAL_URL, "general.url" },
  252. { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
  253. { LLM_KV_GENERAL_LICENSE, "general.license" },
  254. { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" },
  255. { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" },
  256. { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
  257. { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
  258. { LLM_KV_BLOCK_COUNT, "%s.block_count" },
  259. { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
  260. { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
  261. { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
  262. { LLM_KV_EXPERT_COUNT, "%s.expert_count" },
  263. { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
  264. { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
  265. { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
  266. { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
  267. { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
  268. { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" },
  269. { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" },
  270. { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
  271. { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
  272. { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
  273. { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
  274. { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
  275. { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" },
  276. { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" },
  277. { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
  278. { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" },
  279. { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
  280. { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
  281. { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
  282. { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
  283. { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
  284. { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
  285. { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
  286. { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
  287. { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
  288. { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
  289. { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
  290. { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
  291. { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
  292. { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
  293. };
  294. struct LLM_KV {
  295. LLM_KV(llm_arch arch) : arch(arch) {}
  296. llm_arch arch;
  297. std::string operator()(llm_kv kv) const {
  298. return ::format(LLM_KV_NAMES[kv].c_str(), LLM_ARCH_NAMES[arch].c_str());
  299. }
  300. };
  301. enum llm_tensor {
  302. LLM_TENSOR_TOKEN_EMBD,
  303. LLM_TENSOR_TOKEN_EMBD_NORM,
  304. LLM_TENSOR_POS_EMBD,
  305. LLM_TENSOR_OUTPUT,
  306. LLM_TENSOR_OUTPUT_NORM,
  307. LLM_TENSOR_ROPE_FREQS,
  308. LLM_TENSOR_ATTN_Q,
  309. LLM_TENSOR_ATTN_K,
  310. LLM_TENSOR_ATTN_V,
  311. LLM_TENSOR_ATTN_QKV,
  312. LLM_TENSOR_ATTN_OUT,
  313. LLM_TENSOR_ATTN_NORM,
  314. LLM_TENSOR_ATTN_NORM_2,
  315. LLM_TENSOR_ATTN_ROT_EMBD,
  316. LLM_TENSOR_FFN_GATE_INP,
  317. LLM_TENSOR_FFN_NORM,
  318. LLM_TENSOR_FFN_GATE,
  319. LLM_TENSOR_FFN_DOWN,
  320. LLM_TENSOR_FFN_UP,
  321. LLM_TENSOR_FFN_ACT,
  322. LLM_TENSOR_FFN_DOWN_EXP,
  323. LLM_TENSOR_FFN_GATE_EXP,
  324. LLM_TENSOR_FFN_UP_EXP,
  325. LLM_TENSOR_ATTN_Q_NORM,
  326. LLM_TENSOR_ATTN_K_NORM,
  327. };
  328. static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
  329. {
  330. LLM_ARCH_LLAMA,
  331. {
  332. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  333. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  334. { LLM_TENSOR_OUTPUT, "output" },
  335. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  336. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  337. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  338. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  339. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  340. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  341. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  342. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  343. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  344. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  345. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  346. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  347. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  348. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  349. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  350. },
  351. },
  352. {
  353. LLM_ARCH_BAICHUAN,
  354. {
  355. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  356. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  357. { LLM_TENSOR_OUTPUT, "output" },
  358. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  359. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  360. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  361. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  362. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  363. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  364. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  365. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  366. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  367. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  368. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  369. },
  370. },
  371. {
  372. LLM_ARCH_FALCON,
  373. {
  374. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  375. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  376. { LLM_TENSOR_OUTPUT, "output" },
  377. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  378. { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
  379. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  380. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  381. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  382. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  383. },
  384. },
  385. {
  386. LLM_ARCH_GPT2,
  387. {
  388. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  389. { LLM_TENSOR_POS_EMBD, "position_embd" },
  390. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  391. { LLM_TENSOR_OUTPUT, "output" },
  392. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  393. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  394. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  395. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  396. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  397. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  398. },
  399. },
  400. {
  401. LLM_ARCH_GPTJ,
  402. {
  403. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  404. },
  405. },
  406. {
  407. LLM_ARCH_GPTNEOX,
  408. {
  409. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  410. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  411. { LLM_TENSOR_OUTPUT, "output" },
  412. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  413. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  414. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  415. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  416. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  417. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  418. },
  419. },
  420. {
  421. LLM_ARCH_PERSIMMON,
  422. {
  423. { LLM_TENSOR_TOKEN_EMBD, "token_embd"},
  424. { LLM_TENSOR_OUTPUT_NORM, "output_norm"},
  425. { LLM_TENSOR_OUTPUT, "output"},
  426. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm"},
  427. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv"},
  428. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output"},
  429. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
  430. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
  431. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm"},
  432. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down"},
  433. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up"},
  434. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd"},
  435. },
  436. },
  437. {
  438. LLM_ARCH_MPT,
  439. {
  440. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  441. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  442. { LLM_TENSOR_OUTPUT, "output" },
  443. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  444. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  445. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  446. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  447. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  448. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  449. { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" },
  450. },
  451. },
  452. {
  453. LLM_ARCH_STARCODER,
  454. {
  455. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  456. { LLM_TENSOR_POS_EMBD, "position_embd" },
  457. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  458. { LLM_TENSOR_OUTPUT, "output" },
  459. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  460. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  461. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  462. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  463. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  464. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  465. },
  466. },
  467. {
  468. LLM_ARCH_REFACT,
  469. {
  470. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  471. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  472. { LLM_TENSOR_OUTPUT, "output" },
  473. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  474. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  475. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  476. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  477. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  478. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  479. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  480. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  481. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  482. },
  483. },
  484. {
  485. LLM_ARCH_BLOOM,
  486. {
  487. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  488. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  489. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  490. { LLM_TENSOR_OUTPUT, "output" },
  491. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  492. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  493. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  494. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  495. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  496. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  497. },
  498. },
  499. {
  500. LLM_ARCH_STABLELM,
  501. {
  502. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  503. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  504. { LLM_TENSOR_OUTPUT, "output" },
  505. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  506. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  507. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  508. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  509. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  510. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  511. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  512. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  513. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  514. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  515. },
  516. },
  517. {
  518. LLM_ARCH_QWEN,
  519. {
  520. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  521. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  522. { LLM_TENSOR_OUTPUT, "output" },
  523. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  524. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  525. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  526. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  527. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  528. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  529. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  530. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  531. },
  532. },
  533. {
  534. LLM_ARCH_PHI2,
  535. {
  536. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  537. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  538. { LLM_TENSOR_OUTPUT, "output" },
  539. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  540. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  541. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  542. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  543. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  544. },
  545. },
  546. {
  547. LLM_ARCH_PLAMO,
  548. {
  549. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  550. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  551. { LLM_TENSOR_OUTPUT, "output" },
  552. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  553. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  554. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  555. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  556. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  557. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  558. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  559. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  560. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  561. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  562. },
  563. },
  564. {
  565. LLM_ARCH_UNKNOWN,
  566. {
  567. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  568. },
  569. },
  570. };
  571. static llm_arch llm_arch_from_string(const std::string & name) {
  572. for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
  573. if (kv.second == name) {
  574. return kv.first;
  575. }
  576. }
  577. return LLM_ARCH_UNKNOWN;
  578. }
  579. // helper to handle gguf constants
  580. // usage:
  581. //
  582. // const auto tn = LLM_TN(LLM_ARCH_LLAMA);
  583. //
  584. // std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
  585. // std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
  586. // std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
  587. //
  588. struct LLM_TN {
  589. LLM_TN(llm_arch arch) : arch(arch) {}
  590. llm_arch arch;
  591. std::string operator()(llm_tensor tensor) const {
  592. return LLM_TENSOR_NAMES[arch].at(tensor);
  593. }
  594. std::string operator()(llm_tensor tensor, const std::string & suffix) const {
  595. return LLM_TENSOR_NAMES[arch].at(tensor) + "." + suffix;
  596. }
  597. std::string operator()(llm_tensor tensor, int bid) const {
  598. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid);
  599. }
  600. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
  601. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid) + "." + suffix;
  602. }
  603. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
  604. return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid, xid) + "." + suffix;
  605. }
  606. };
  607. //
  608. // gguf helpers
  609. //
  610. static std::map<int8_t, std::string> LLAMA_ROPE_SCALING_TYPES = {
  611. { LLAMA_ROPE_SCALING_NONE, "none" },
  612. { LLAMA_ROPE_SCALING_LINEAR, "linear" },
  613. { LLAMA_ROPE_SCALING_YARN, "yarn" },
  614. };
  615. static int8_t llama_rope_scaling_type_from_string(const std::string & name) {
  616. for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
  617. if (kv.second == name) {
  618. return kv.first;
  619. }
  620. }
  621. return LLAMA_ROPE_SCALING_UNSPECIFIED;
  622. }
  623. static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
  624. switch (type) {
  625. case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
  626. case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
  627. case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
  628. case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
  629. case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
  630. case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
  631. case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
  632. case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
  633. case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
  634. case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
  635. case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
  636. default: return format("unknown type %d", type);
  637. }
  638. }
  639. static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
  640. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  641. switch (type) {
  642. case GGUF_TYPE_STRING:
  643. return gguf_get_val_str(ctx_gguf, i);
  644. case GGUF_TYPE_ARRAY:
  645. {
  646. const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
  647. int arr_n = gguf_get_arr_n(ctx_gguf, i);
  648. const void * data = gguf_get_arr_data(ctx_gguf, i);
  649. std::stringstream ss;
  650. ss << "[";
  651. for (int j = 0; j < arr_n; j++) {
  652. if (arr_type == GGUF_TYPE_STRING) {
  653. std::string val = gguf_get_arr_str(ctx_gguf, i, j);
  654. // escape quotes
  655. replace_all(val, "\\", "\\\\");
  656. replace_all(val, "\"", "\\\"");
  657. ss << '"' << val << '"';
  658. } else if (arr_type == GGUF_TYPE_ARRAY) {
  659. ss << "???";
  660. } else {
  661. ss << gguf_data_to_str(arr_type, data, j);
  662. }
  663. if (j < arr_n - 1) {
  664. ss << ", ";
  665. }
  666. }
  667. ss << "]";
  668. return ss.str();
  669. }
  670. default:
  671. return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
  672. }
  673. }
  674. //
  675. // ggml helpers
  676. //
  677. static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
  678. struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
  679. if (plan.work_size > 0) {
  680. buf.resize(plan.work_size);
  681. plan.work_data = buf.data();
  682. }
  683. ggml_graph_compute(graph, &plan);
  684. }
  685. //
  686. // llama helpers
  687. //
  688. #if defined(_WIN32)
  689. static std::string llama_format_win_err(DWORD err) {
  690. LPSTR buf;
  691. size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
  692. NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
  693. if (!size) {
  694. return "FormatMessageA failed";
  695. }
  696. std::string ret(buf, size);
  697. LocalFree(buf);
  698. return ret;
  699. }
  700. #endif
  701. template <typename T>
  702. struct no_init {
  703. T value;
  704. no_init() { /* do nothing */ }
  705. };
  706. struct llama_file {
  707. // use FILE * so we don't have to re-open the file to mmap
  708. FILE * fp;
  709. size_t size;
  710. llama_file(const char * fname, const char * mode) {
  711. fp = std::fopen(fname, mode);
  712. if (fp == NULL) {
  713. throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
  714. }
  715. seek(0, SEEK_END);
  716. size = tell();
  717. seek(0, SEEK_SET);
  718. }
  719. size_t tell() const {
  720. #ifdef _WIN32
  721. __int64 ret = _ftelli64(fp);
  722. #else
  723. long ret = std::ftell(fp);
  724. #endif
  725. GGML_ASSERT(ret != -1); // this really shouldn't fail
  726. return (size_t) ret;
  727. }
  728. void seek(size_t offset, int whence) const {
  729. #ifdef _WIN32
  730. int ret = _fseeki64(fp, (__int64) offset, whence);
  731. #else
  732. int ret = std::fseek(fp, (long) offset, whence);
  733. #endif
  734. GGML_ASSERT(ret == 0); // same
  735. }
  736. void read_raw(void * ptr, size_t len) const {
  737. if (len == 0) {
  738. return;
  739. }
  740. errno = 0;
  741. std::size_t ret = std::fread(ptr, len, 1, fp);
  742. if (ferror(fp)) {
  743. throw std::runtime_error(format("read error: %s", strerror(errno)));
  744. }
  745. if (ret != 1) {
  746. throw std::runtime_error("unexpectedly reached end of file");
  747. }
  748. }
  749. uint32_t read_u32() const {
  750. uint32_t ret;
  751. read_raw(&ret, sizeof(ret));
  752. return ret;
  753. }
  754. void write_raw(const void * ptr, size_t len) const {
  755. if (len == 0) {
  756. return;
  757. }
  758. errno = 0;
  759. size_t ret = std::fwrite(ptr, len, 1, fp);
  760. if (ret != 1) {
  761. throw std::runtime_error(format("write error: %s", strerror(errno)));
  762. }
  763. }
  764. void write_u32(std::uint32_t val) const {
  765. write_raw(&val, sizeof(val));
  766. }
  767. ~llama_file() {
  768. if (fp) {
  769. std::fclose(fp);
  770. }
  771. }
  772. };
  773. struct llama_mmap {
  774. void * addr;
  775. size_t size;
  776. llama_mmap(const llama_mmap &) = delete;
  777. #ifdef _POSIX_MAPPED_FILES
  778. static constexpr bool SUPPORTED = true;
  779. // list of mapped fragments (first_offset, last_offset)
  780. std::vector<std::pair<size_t, size_t>> mapped_fragments;
  781. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
  782. size = file->size;
  783. int fd = fileno(file->fp);
  784. int flags = MAP_SHARED;
  785. // prefetch/readahead impairs performance on NUMA systems
  786. if (numa) { prefetch = 0; }
  787. #ifdef __linux__
  788. // advise the kernel to read the file sequentially (increases readahead)
  789. if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
  790. LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n",
  791. strerror(errno));
  792. }
  793. if (prefetch) { flags |= MAP_POPULATE; }
  794. #endif
  795. addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
  796. if (addr == MAP_FAILED) { // NOLINT
  797. throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
  798. }
  799. if (prefetch > 0) {
  800. // advise the kernel to preload the mapped memory
  801. if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
  802. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
  803. strerror(errno));
  804. }
  805. }
  806. if (numa) {
  807. // advise the kernel not to use readahead
  808. // (because the next page might not belong on the same node)
  809. if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
  810. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
  811. strerror(errno));
  812. }
  813. }
  814. // initialize list of mapped_fragments
  815. mapped_fragments.emplace_back(0, file->size);
  816. }
  817. static void align_range(size_t * first, size_t * last, size_t page_size) {
  818. // align first to the next page
  819. size_t offset_in_page = *first & (page_size - 1);
  820. size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page;
  821. *first += offset_to_page;
  822. // align last to the previous page
  823. *last = *last & ~(page_size - 1);
  824. if (*last <= *first) {
  825. *last = *first;
  826. }
  827. }
  828. // partially unmap the file in the range [first, last)
  829. void unmap_fragment(size_t first, size_t last) {
  830. // note: this function must not be called multiple times with overlapping ranges
  831. // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
  832. int page_size = sysconf(_SC_PAGESIZE);
  833. align_range(&first, &last, page_size);
  834. size_t len = last - first;
  835. if (len == 0) {
  836. return;
  837. }
  838. GGML_ASSERT(first % page_size == 0);
  839. GGML_ASSERT(last % page_size == 0);
  840. GGML_ASSERT(last > first);
  841. void * next_page_start = (uint8_t *) addr + first;
  842. // unmap the range
  843. if (munmap(next_page_start, len)) {
  844. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  845. }
  846. // update the list of mapped fragments to avoid unmapping the same range again in the destructor
  847. std::vector<std::pair<size_t, size_t>> new_mapped_fragments;
  848. for (const auto & frag : mapped_fragments) {
  849. if (frag.first < first && frag.second > last) {
  850. // the range is in the middle of the fragment, split it
  851. new_mapped_fragments.emplace_back(frag.first, first);
  852. new_mapped_fragments.emplace_back(last, frag.second);
  853. } else if (frag.first < first && frag.second > first) {
  854. // the range starts in the middle of the fragment
  855. new_mapped_fragments.emplace_back(frag.first, first);
  856. } else if (frag.first < last && frag.second > last) {
  857. // the range ends in the middle of the fragment
  858. new_mapped_fragments.emplace_back(last, frag.second);
  859. } else if (frag.first >= first && frag.second <= last) {
  860. // the range covers the entire fragment
  861. } else {
  862. // the range is outside the fragment
  863. new_mapped_fragments.push_back(frag);
  864. }
  865. }
  866. mapped_fragments = std::move(new_mapped_fragments);
  867. }
  868. ~llama_mmap() {
  869. for (const auto & frag : mapped_fragments) {
  870. if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
  871. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  872. }
  873. }
  874. }
  875. #elif defined(_WIN32)
  876. static constexpr bool SUPPORTED = true;
  877. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) {
  878. GGML_UNUSED(numa);
  879. size = file->size;
  880. HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
  881. HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
  882. if (hMapping == NULL) {
  883. DWORD error = GetLastError();
  884. throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
  885. }
  886. addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
  887. DWORD error = GetLastError();
  888. CloseHandle(hMapping);
  889. if (addr == NULL) {
  890. throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
  891. }
  892. if (prefetch > 0) {
  893. // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
  894. BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
  895. HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
  896. // may fail on pre-Windows 8 systems
  897. pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
  898. if (pPrefetchVirtualMemory) {
  899. // advise the kernel to preload the mapped memory
  900. WIN32_MEMORY_RANGE_ENTRY range;
  901. range.VirtualAddress = addr;
  902. range.NumberOfBytes = (SIZE_T) std::min(size, prefetch);
  903. if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
  904. LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n",
  905. llama_format_win_err(GetLastError()).c_str());
  906. }
  907. }
  908. }
  909. }
  910. void unmap_fragment(size_t first, size_t last) {
  911. // not supported
  912. GGML_UNUSED(first);
  913. GGML_UNUSED(last);
  914. }
  915. ~llama_mmap() {
  916. if (!UnmapViewOfFile(addr)) {
  917. LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n",
  918. llama_format_win_err(GetLastError()).c_str());
  919. }
  920. }
  921. #else
  922. static constexpr bool SUPPORTED = false;
  923. llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) {
  924. GGML_UNUSED(file);
  925. GGML_UNUSED(prefetch);
  926. GGML_UNUSED(numa);
  927. throw std::runtime_error("mmap not supported");
  928. }
  929. void unmap_fragment(size_t first, size_t last) {
  930. GGML_UNUSED(first);
  931. GGML_UNUSED(last);
  932. throw std::runtime_error("mmap not supported");
  933. }
  934. #endif
  935. };
  936. // Represents some region of memory being locked using mlock or VirtualLock;
  937. // will automatically unlock on destruction.
  938. struct llama_mlock {
  939. void * addr = NULL;
  940. size_t size = 0;
  941. bool failed_already = false;
  942. llama_mlock() {}
  943. llama_mlock(const llama_mlock &) = delete;
  944. ~llama_mlock() {
  945. if (size) {
  946. raw_unlock(addr, size);
  947. }
  948. }
  949. void init(void * ptr) {
  950. GGML_ASSERT(addr == NULL && size == 0); // NOLINT
  951. addr = ptr;
  952. }
  953. void grow_to(size_t target_size) {
  954. GGML_ASSERT(addr);
  955. if (failed_already) {
  956. return;
  957. }
  958. size_t granularity = lock_granularity();
  959. target_size = (target_size + granularity - 1) & ~(granularity - 1);
  960. if (target_size > size) {
  961. if (raw_lock((uint8_t *) addr + size, target_size - size)) {
  962. size = target_size;
  963. } else {
  964. failed_already = true;
  965. }
  966. }
  967. }
  968. #ifdef _POSIX_MEMLOCK_RANGE
  969. static constexpr bool SUPPORTED = true;
  970. static size_t lock_granularity() {
  971. return (size_t) sysconf(_SC_PAGESIZE);
  972. }
  973. #ifdef __APPLE__
  974. #define MLOCK_SUGGESTION \
  975. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  976. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
  977. #else
  978. #define MLOCK_SUGGESTION \
  979. "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
  980. #endif
  981. bool raw_lock(const void * addr, size_t size) const {
  982. if (!mlock(addr, size)) {
  983. return true;
  984. }
  985. char* errmsg = std::strerror(errno);
  986. bool suggest = (errno == ENOMEM);
  987. // Check if the resource limit is fine after all
  988. struct rlimit lock_limit;
  989. if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
  990. suggest = false;
  991. }
  992. if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
  993. suggest = false;
  994. }
  995. fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
  996. size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
  997. return false;
  998. }
  999. #undef MLOCK_SUGGESTION
  1000. static void raw_unlock(void * addr, size_t size) {
  1001. if (munlock(addr, size)) {
  1002. fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
  1003. }
  1004. }
  1005. #elif defined(_WIN32)
  1006. static constexpr bool SUPPORTED = true;
  1007. static size_t lock_granularity() {
  1008. SYSTEM_INFO si;
  1009. GetSystemInfo(&si);
  1010. return (size_t) si.dwPageSize;
  1011. }
  1012. bool raw_lock(void * ptr, size_t len) const {
  1013. for (int tries = 1; ; tries++) {
  1014. if (VirtualLock(ptr, len)) {
  1015. return true;
  1016. }
  1017. if (tries == 2) {
  1018. fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
  1019. len, size, llama_format_win_err(GetLastError()).c_str());
  1020. return false;
  1021. }
  1022. // It failed but this was only the first try; increase the working
  1023. // set size and try again.
  1024. SIZE_T min_ws_size, max_ws_size;
  1025. if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
  1026. fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
  1027. llama_format_win_err(GetLastError()).c_str());
  1028. return false;
  1029. }
  1030. // Per MSDN: "The maximum number of pages that a process can lock
  1031. // is equal to the number of pages in its minimum working set minus
  1032. // a small overhead."
  1033. // Hopefully a megabyte is enough overhead:
  1034. size_t increment = len + 1048576;
  1035. // The minimum must be <= the maximum, so we need to increase both:
  1036. min_ws_size += increment;
  1037. max_ws_size += increment;
  1038. if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
  1039. fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
  1040. llama_format_win_err(GetLastError()).c_str());
  1041. return false;
  1042. }
  1043. }
  1044. }
  1045. static void raw_unlock(void * ptr, size_t len) {
  1046. if (!VirtualUnlock(ptr, len)) {
  1047. fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
  1048. llama_format_win_err(GetLastError()).c_str());
  1049. }
  1050. }
  1051. #else
  1052. static constexpr bool SUPPORTED = false;
  1053. static size_t lock_granularity() {
  1054. return (size_t) 65536;
  1055. }
  1056. bool raw_lock(const void * addr, size_t len) const {
  1057. fprintf(stderr, "warning: mlock not supported on this system\n");
  1058. return false;
  1059. }
  1060. static void raw_unlock(const void * addr, size_t len) {}
  1061. #endif
  1062. };
  1063. typedef void (*offload_func_t)(struct ggml_tensor * tensor);
  1064. static void ggml_offload_nop(struct ggml_tensor * tensor) {
  1065. (void) tensor;
  1066. }
  1067. static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) {
  1068. std::vector<char> result(8, 0);
  1069. const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
  1070. if (n_tokens < 0) {
  1071. result.resize(-n_tokens);
  1072. int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
  1073. GGML_ASSERT(check == -n_tokens);
  1074. }
  1075. else {
  1076. result.resize(n_tokens);
  1077. }
  1078. return std::string(result.data(), result.size());
  1079. }
  1080. static ggml_backend_buffer_type_t llama_default_buffer_type(int n_gpu_layers) {
  1081. ggml_backend_buffer_type_t buft = nullptr;
  1082. #ifdef GGML_USE_METAL
  1083. if (n_gpu_layers > 0) {
  1084. buft = ggml_backend_metal_buffer_type();
  1085. }
  1086. #elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  1087. if (n_gpu_layers > 0) {
  1088. buft = ggml_backend_cuda_buffer_type(0);
  1089. }
  1090. #elif defined(GGML_USE_CUBLAS)
  1091. buft = ggml_backend_cuda_host_buffer_type();
  1092. #elif defined(GGML_USE_CPU_HBM)
  1093. buft = ggml_backend_cpu_hbm_buffer_type();
  1094. #endif
  1095. if (buft == nullptr) {
  1096. buft = ggml_backend_cpu_buffer_type();
  1097. }
  1098. return buft;
  1099. GGML_UNUSED(n_gpu_layers);
  1100. }
  1101. //
  1102. // globals
  1103. //
  1104. struct llama_state {
  1105. llama_state() {
  1106. #ifdef GGML_USE_METAL
  1107. ggml_metal_log_set_callback(log_callback, log_callback_user_data);
  1108. #endif
  1109. }
  1110. // We save the log callback globally
  1111. ggml_log_callback log_callback = llama_log_callback_default;
  1112. void * log_callback_user_data = nullptr;
  1113. };
  1114. static llama_state g_state;
  1115. // available llama models
  1116. enum e_model {
  1117. MODEL_UNKNOWN,
  1118. MODEL_1B,
  1119. MODEL_3B,
  1120. MODEL_7B,
  1121. MODEL_8B,
  1122. MODEL_13B,
  1123. MODEL_15B,
  1124. MODEL_30B,
  1125. MODEL_34B,
  1126. MODEL_40B,
  1127. MODEL_65B,
  1128. MODEL_70B,
  1129. MODEL_SMALL,
  1130. MODEL_MEDIUM,
  1131. MODEL_LARGE,
  1132. MODEL_XL,
  1133. };
  1134. static const size_t kiB = 1024;
  1135. static const size_t MiB = 1024*kiB;
  1136. static const size_t GiB = 1024*MiB;
  1137. struct llama_hparams {
  1138. bool vocab_only;
  1139. uint32_t n_vocab;
  1140. uint32_t n_ctx_train; // context size the model was trained on
  1141. uint32_t n_embd;
  1142. uint32_t n_head;
  1143. uint32_t n_head_kv;
  1144. uint32_t n_layer;
  1145. uint32_t n_rot;
  1146. uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
  1147. uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
  1148. uint32_t n_ff;
  1149. uint32_t n_expert = 0;
  1150. uint32_t n_expert_used = 0;
  1151. float f_norm_eps;
  1152. float f_norm_rms_eps;
  1153. float rope_freq_base_train;
  1154. float rope_freq_scale_train;
  1155. uint32_t n_yarn_orig_ctx;
  1156. int8_t rope_scaling_type_train : 3;
  1157. bool rope_finetuned : 1;
  1158. float f_clamp_kqv;
  1159. float f_max_alibi_bias;
  1160. bool operator!=(const llama_hparams & other) const {
  1161. if (this->vocab_only != other.vocab_only) return true;
  1162. if (this->n_vocab != other.n_vocab) return true;
  1163. if (this->n_ctx_train != other.n_ctx_train) return true;
  1164. if (this->n_embd != other.n_embd) return true;
  1165. if (this->n_head != other.n_head) return true;
  1166. if (this->n_head_kv != other.n_head_kv) return true;
  1167. if (this->n_layer != other.n_layer) return true;
  1168. if (this->n_rot != other.n_rot) return true;
  1169. if (this->n_embd_head_k != other.n_embd_head_k) return true;
  1170. if (this->n_embd_head_v != other.n_embd_head_v) return true;
  1171. if (this->n_ff != other.n_ff) return true;
  1172. if (this->n_expert != other.n_expert) return true;
  1173. if (this->n_expert_used != other.n_expert_used) return true;
  1174. if (this->rope_finetuned != other.rope_finetuned) return true;
  1175. if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true;
  1176. const float EPSILON = 1e-9f;
  1177. if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
  1178. if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
  1179. if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true;
  1180. if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true;
  1181. return false;
  1182. }
  1183. uint32_t n_gqa() const {
  1184. return n_head/n_head_kv;
  1185. }
  1186. uint32_t n_embd_k_gqa() const { // dimension of key embeddings across all k-v heads
  1187. return n_embd_head_k * n_head_kv;
  1188. }
  1189. uint32_t n_embd_v_gqa() const { // dimension of value embeddings across all k-v heads
  1190. return n_embd_head_v * n_head_kv;
  1191. }
  1192. };
  1193. struct llama_cparams {
  1194. uint32_t n_ctx; // context size used during inference
  1195. uint32_t n_batch;
  1196. uint32_t n_threads; // number of threads to use for generation
  1197. uint32_t n_threads_batch; // number of threads to use for batch processing
  1198. float rope_freq_base;
  1199. float rope_freq_scale;
  1200. uint32_t n_yarn_orig_ctx;
  1201. // These hyperparameters are not exposed in GGUF, because all
  1202. // existing YaRN models use the same values for them.
  1203. float yarn_ext_factor;
  1204. float yarn_attn_factor;
  1205. float yarn_beta_fast;
  1206. float yarn_beta_slow;
  1207. bool mul_mat_q;
  1208. bool offload_kqv;
  1209. };
  1210. struct llama_layer {
  1211. // normalization
  1212. struct ggml_tensor * attn_norm;
  1213. struct ggml_tensor * attn_norm_b;
  1214. struct ggml_tensor * attn_norm_2;
  1215. struct ggml_tensor * attn_norm_2_b;
  1216. struct ggml_tensor * attn_q_norm;
  1217. struct ggml_tensor * attn_q_norm_b;
  1218. struct ggml_tensor * attn_k_norm;
  1219. struct ggml_tensor * attn_k_norm_b;
  1220. // attention
  1221. struct ggml_tensor * wq;
  1222. struct ggml_tensor * wk;
  1223. struct ggml_tensor * wv;
  1224. struct ggml_tensor * wo;
  1225. struct ggml_tensor * wqkv;
  1226. // attention bias
  1227. struct ggml_tensor * bq;
  1228. struct ggml_tensor * bk;
  1229. struct ggml_tensor * bv;
  1230. struct ggml_tensor * bo;
  1231. struct ggml_tensor * bqkv;
  1232. // normalization
  1233. struct ggml_tensor * ffn_norm;
  1234. struct ggml_tensor * ffn_norm_b;
  1235. // ff
  1236. struct ggml_tensor * ffn_gate; // w1
  1237. struct ggml_tensor * ffn_down; // w2
  1238. struct ggml_tensor * ffn_up; // w3
  1239. // ff MoE
  1240. struct ggml_tensor * ffn_gate_inp;
  1241. struct ggml_tensor * ffn_gate_exp[LLAMA_MAX_EXPERTS];
  1242. struct ggml_tensor * ffn_down_exp[LLAMA_MAX_EXPERTS];
  1243. struct ggml_tensor * ffn_up_exp [LLAMA_MAX_EXPERTS];
  1244. // ff bias
  1245. struct ggml_tensor * ffn_down_b; // b2
  1246. struct ggml_tensor * ffn_up_b; // b3
  1247. struct ggml_tensor * ffn_act;
  1248. };
  1249. struct llama_kv_cell {
  1250. llama_pos pos = -1;
  1251. llama_pos delta = 0;
  1252. std::set<llama_seq_id> seq_id;
  1253. bool has_seq_id(const llama_seq_id & id) const {
  1254. return seq_id.find(id) != seq_id.end();
  1255. }
  1256. };
  1257. // ring-buffer of cached KV data
  1258. struct llama_kv_cache {
  1259. bool has_shift = false;
  1260. // Note: The value of head isn't only used to optimize searching
  1261. // for a free KV slot. llama_decode_internal also uses it, so it
  1262. // cannot be freely changed after a slot has been allocated.
  1263. uint32_t head = 0;
  1264. uint32_t size = 0;
  1265. uint32_t used = 0; // used cells (i.e. at least one seq_id)
  1266. // computed before each graph build
  1267. uint32_t n = 0;
  1268. std::vector<llama_kv_cell> cells;
  1269. std::vector<struct ggml_tensor *> k_l; // per layer
  1270. std::vector<struct ggml_tensor *> v_l;
  1271. struct ggml_context * ctx = NULL;
  1272. ggml_backend_buffer_t buf = NULL;
  1273. ~llama_kv_cache() {
  1274. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  1275. if (ggml_cublas_loaded()) {
  1276. for (size_t i = 0; i < k_l.size(); ++i) {
  1277. ggml_cuda_free_data(k_l[i]);
  1278. ggml_cuda_free_data(v_l[i]);
  1279. }
  1280. }
  1281. #endif
  1282. if (ctx) {
  1283. ggml_free(ctx);
  1284. }
  1285. ggml_backend_buffer_free(buf);
  1286. }
  1287. };
  1288. struct llama_vocab {
  1289. using id = int32_t;
  1290. using token = std::string;
  1291. using ttype = llama_token_type;
  1292. struct token_data {
  1293. token text;
  1294. float score;
  1295. ttype type;
  1296. };
  1297. enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM;
  1298. std::unordered_map<token, id> token_to_id;
  1299. std::vector<token_data> id_to_token;
  1300. std::unordered_map<token, id> special_tokens_cache;
  1301. std::map<std::pair<std::string, std::string>, int> bpe_ranks;
  1302. // default LLaMA special tokens
  1303. id special_bos_id = 1;
  1304. id special_eos_id = 2;
  1305. id special_unk_id = 0;
  1306. id special_sep_id = -1;
  1307. id special_pad_id = -1;
  1308. int special_add_bos = -1; // -1 unknown, 1 add, 0 don't add.
  1309. int special_add_eos = -1; // -1 unknown, 1 add, 0 don't add.
  1310. id linefeed_id = 13;
  1311. id special_prefix_id = 32007;
  1312. id special_middle_id = 32009;
  1313. id special_suffix_id = 32008;
  1314. id special_eot_id = 32010;
  1315. int find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
  1316. GGML_ASSERT(token_left.find(' ') == std::string::npos);
  1317. GGML_ASSERT(token_left.find('\n') == std::string::npos);
  1318. GGML_ASSERT(token_right.find(' ') == std::string::npos);
  1319. GGML_ASSERT(token_right.find('\n') == std::string::npos);
  1320. auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
  1321. if (it == bpe_ranks.end()) {
  1322. return -1;
  1323. }
  1324. return it->second;
  1325. }
  1326. };
  1327. struct llama_model {
  1328. e_model type = MODEL_UNKNOWN;
  1329. llm_arch arch = LLM_ARCH_UNKNOWN;
  1330. llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
  1331. std::string name = "n/a";
  1332. llama_hparams hparams = {};
  1333. llama_vocab vocab;
  1334. struct ggml_tensor * tok_embd;
  1335. struct ggml_tensor * pos_embd;
  1336. struct ggml_tensor * tok_norm;
  1337. struct ggml_tensor * tok_norm_b;
  1338. struct ggml_tensor * output_norm;
  1339. struct ggml_tensor * output_norm_b;
  1340. struct ggml_tensor * output;
  1341. struct ggml_tensor * output_b;
  1342. std::vector<llama_layer> layers;
  1343. int n_gpu_layers;
  1344. // gguf metadata
  1345. std::unordered_map<std::string, std::string> gguf_kv;
  1346. // context
  1347. struct ggml_context * ctx = NULL;
  1348. // the model memory buffer
  1349. ggml_backend_buffer_t buf = NULL;
  1350. // model memory mapped file
  1351. std::unique_ptr<llama_mmap> mapping;
  1352. // objects representing data potentially being locked in memory
  1353. llama_mlock mlock_buf;
  1354. llama_mlock mlock_mmap;
  1355. // for quantize-stats only
  1356. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  1357. int64_t t_load_us = 0;
  1358. int64_t t_start_us = 0;
  1359. ~llama_model() {
  1360. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  1361. if (ggml_cublas_loaded()) {
  1362. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  1363. ggml_cuda_free_data(tensors_by_name[i].second);
  1364. }
  1365. ggml_cuda_free_scratch();
  1366. }
  1367. #endif
  1368. #if defined(GGML_USE_CLBLAST)
  1369. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  1370. ggml_cl_free_data(tensors_by_name[i].second);
  1371. }
  1372. #endif
  1373. if (ctx) {
  1374. ggml_free(ctx);
  1375. }
  1376. ggml_backend_buffer_free(buf);
  1377. }
  1378. };
  1379. struct llama_context {
  1380. llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {}
  1381. ~llama_context() {
  1382. ggml_allocr_free(alloc);
  1383. ggml_backend_buffer_free(buf_alloc);
  1384. ggml_backend_free(backend);
  1385. }
  1386. llama_cparams cparams;
  1387. ggml_backend_t backend = nullptr;
  1388. const llama_model & model;
  1389. // key + value cache for the self attention
  1390. struct llama_kv_cache kv_self;
  1391. std::mt19937 rng;
  1392. bool has_evaluated_once = false;
  1393. int64_t t_start_us;
  1394. int64_t t_load_us;
  1395. int64_t t_sample_us = 0;
  1396. int64_t t_p_eval_us = 0;
  1397. int64_t t_eval_us = 0;
  1398. int32_t n_sample = 0; // number of tokens sampled
  1399. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  1400. int32_t n_eval = 0; // number of eval calls
  1401. // decode output (2-dimensional array: [n_tokens][n_vocab])
  1402. std::vector<float> logits;
  1403. #ifndef NDEBUG
  1404. // guard against access to unset logits
  1405. std::vector<bool> logits_valid;
  1406. #endif
  1407. bool logits_all = false;
  1408. // input embedding (1-dimensional array: [n_embd])
  1409. std::vector<float> embedding;
  1410. // memory buffers used to evaluate the model
  1411. std::vector<uint8_t> buf_compute_meta;
  1412. ggml_backend_buffer_t buf_alloc = NULL;
  1413. ggml_allocr * alloc = NULL;
  1414. // temporary buffer for copying data to/from the backend
  1415. std::vector<no_init<uint8_t>> buf_copy;
  1416. #ifdef GGML_USE_MPI
  1417. ggml_mpi_context * ctx_mpi = NULL;
  1418. #endif
  1419. };
  1420. //
  1421. // kv cache helpers
  1422. //
  1423. static bool llama_kv_cache_init(
  1424. const struct llama_hparams & hparams,
  1425. struct llama_kv_cache & cache,
  1426. ggml_type ktype,
  1427. ggml_type vtype,
  1428. uint32_t n_ctx,
  1429. int n_gpu_layers,
  1430. bool offload) {
  1431. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  1432. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  1433. const uint32_t n_layer = hparams.n_layer;
  1434. cache.has_shift = false;
  1435. cache.head = 0;
  1436. cache.size = n_ctx;
  1437. cache.used = 0;
  1438. cache.cells.clear();
  1439. cache.cells.resize(n_ctx);
  1440. struct ggml_init_params params;
  1441. params.mem_size = 2u*n_layer*ggml_tensor_overhead();
  1442. params.mem_buffer = NULL;
  1443. params.no_alloc = true;
  1444. cache.ctx = ggml_init(params);
  1445. size_t vram_kv_cache = 0;
  1446. if (!cache.ctx) {
  1447. LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
  1448. return false;
  1449. }
  1450. cache.k_l.reserve(n_layer);
  1451. cache.v_l.reserve(n_layer);
  1452. const int i_gpu_start = (int) n_layer - n_gpu_layers;
  1453. for (int i = 0; i < (int) n_layer; i++) {
  1454. ggml_tensor * k = ggml_new_tensor_1d(cache.ctx, ktype, n_embd_k_gqa*n_ctx);
  1455. ggml_tensor * v = ggml_new_tensor_1d(cache.ctx, vtype, n_embd_v_gqa*n_ctx);
  1456. ggml_format_name(k, "cache_k_l%d", i);
  1457. ggml_format_name(v, "cache_v_l%d", i);
  1458. cache.k_l.push_back(k);
  1459. cache.v_l.push_back(v);
  1460. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  1461. if (i >= i_gpu_start) {
  1462. if (offload) {
  1463. ggml_cuda_assign_buffers_no_scratch(k);
  1464. ggml_cuda_assign_buffers_no_scratch(v);
  1465. vram_kv_cache += ggml_nbytes(k);
  1466. vram_kv_cache += ggml_nbytes(v);
  1467. // HACK: mark tensor as allocated
  1468. k->data = v->data = (void *)(uintptr_t)1;
  1469. }
  1470. }
  1471. #endif // GGML_USE_CUBLAS
  1472. }
  1473. // allocate tensors
  1474. cache.buf = ggml_backend_alloc_ctx_tensors_from_buft(cache.ctx, llama_default_buffer_type(n_gpu_layers));
  1475. // buf may be NULL with full offload
  1476. if (cache.buf) {
  1477. // initialize the buffer to avoid NaNs in the padding
  1478. ggml_backend_buffer_clear(cache.buf, 0);
  1479. }
  1480. if (vram_kv_cache > 0) {
  1481. LLAMA_LOG_INFO("%s: VRAM kv self = %.2f MB\n", __func__, vram_kv_cache / 1024.0 / 1024.0);
  1482. }
  1483. GGML_UNUSED(i_gpu_start);
  1484. GGML_UNUSED(offload);
  1485. return true;
  1486. }
  1487. // find an empty slot of size "n_tokens" in the cache
  1488. // updates the cache head
  1489. // Note: On success, it's important that cache.head points
  1490. // to the first cell of the slot.
  1491. static bool llama_kv_cache_find_slot(
  1492. struct llama_kv_cache & cache,
  1493. const struct llama_batch & batch) {
  1494. const uint32_t n_ctx = cache.size;
  1495. const uint32_t n_tokens = batch.n_tokens;
  1496. if (n_tokens > n_ctx) {
  1497. LLAMA_LOG_ERROR("%s: n_tokens=%d > n_ctx=%d\n", __func__, n_tokens, n_ctx);
  1498. return false;
  1499. }
  1500. uint32_t n_tested = 0;
  1501. while (true) {
  1502. if (cache.head + n_tokens > n_ctx) {
  1503. n_tested += n_ctx - cache.head;
  1504. cache.head = 0;
  1505. continue;
  1506. }
  1507. bool found = true;
  1508. for (uint32_t i = 0; i < n_tokens; i++) {
  1509. if (cache.cells[cache.head + i].pos >= 0) {
  1510. found = false;
  1511. cache.head += i + 1;
  1512. n_tested += i + 1;
  1513. break;
  1514. }
  1515. }
  1516. if (found) {
  1517. break;
  1518. }
  1519. if (n_tested >= n_ctx) {
  1520. //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
  1521. return false;
  1522. }
  1523. }
  1524. for (uint32_t i = 0; i < n_tokens; i++) {
  1525. cache.cells[cache.head + i].pos = batch.pos[i];
  1526. for (int32_t j = 0; j < batch.n_seq_id[i]; j++) {
  1527. cache.cells[cache.head + i].seq_id.insert(batch.seq_id[i][j]);
  1528. }
  1529. }
  1530. cache.used += n_tokens;
  1531. return true;
  1532. }
  1533. // find how many cells are currently in use
  1534. static int32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
  1535. for (uint32_t i = cache.size - 1; i > 0; --i) {
  1536. if (cache.cells[i].pos >= 0 && !cache.cells[i].seq_id.empty()) {
  1537. return i + 1;
  1538. }
  1539. }
  1540. return 0;
  1541. }
  1542. static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
  1543. for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
  1544. cache.cells[i].pos = -1;
  1545. cache.cells[i].seq_id.clear();
  1546. }
  1547. cache.head = 0;
  1548. cache.used = 0;
  1549. }
  1550. static void llama_kv_cache_seq_rm(
  1551. struct llama_kv_cache & cache,
  1552. llama_seq_id seq_id,
  1553. llama_pos p0,
  1554. llama_pos p1) {
  1555. uint32_t new_head = cache.size;
  1556. if (p0 < 0) p0 = 0;
  1557. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1558. for (uint32_t i = 0; i < cache.size; ++i) {
  1559. if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1560. if (seq_id < 0) {
  1561. cache.cells[i].seq_id.clear();
  1562. } else if (cache.cells[i].has_seq_id(seq_id)) {
  1563. cache.cells[i].seq_id.erase(seq_id);
  1564. } else {
  1565. continue;
  1566. }
  1567. if (cache.cells[i].seq_id.empty()) {
  1568. // keep count of the number of used cells
  1569. if (cache.cells[i].pos >= 0) cache.used--;
  1570. cache.cells[i].pos = -1;
  1571. if (new_head == cache.size) new_head = i;
  1572. }
  1573. }
  1574. }
  1575. // If we freed up a slot, set head to it so searching can start there.
  1576. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  1577. }
  1578. static void llama_kv_cache_seq_cp(
  1579. struct llama_kv_cache & cache,
  1580. llama_seq_id seq_id_src,
  1581. llama_seq_id seq_id_dst,
  1582. llama_pos p0,
  1583. llama_pos p1) {
  1584. if (p0 < 0) p0 = 0;
  1585. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1586. cache.head = 0;
  1587. for (uint32_t i = 0; i < cache.size; ++i) {
  1588. if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1589. cache.cells[i].seq_id.insert(seq_id_dst);
  1590. }
  1591. }
  1592. }
  1593. static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
  1594. uint32_t new_head = cache.size;
  1595. for (uint32_t i = 0; i < cache.size; ++i) {
  1596. if (!cache.cells[i].has_seq_id(seq_id)) {
  1597. if (cache.cells[i].pos >= 0) cache.used--;
  1598. cache.cells[i].pos = -1;
  1599. cache.cells[i].seq_id.clear();
  1600. if (new_head == cache.size) new_head = i;
  1601. } else {
  1602. cache.cells[i].seq_id.clear();
  1603. cache.cells[i].seq_id.insert(seq_id);
  1604. }
  1605. }
  1606. // If we freed up a slot, set head to it so searching can start there.
  1607. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  1608. }
  1609. static void llama_kv_cache_seq_shift(
  1610. struct llama_kv_cache & cache,
  1611. llama_seq_id seq_id,
  1612. llama_pos p0,
  1613. llama_pos p1,
  1614. llama_pos delta) {
  1615. uint32_t new_head = cache.size;
  1616. if (p0 < 0) p0 = 0;
  1617. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1618. for (uint32_t i = 0; i < cache.size; ++i) {
  1619. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1620. cache.has_shift = true;
  1621. cache.cells[i].pos += delta;
  1622. cache.cells[i].delta += delta;
  1623. if (cache.cells[i].pos < 0) {
  1624. if (!cache.cells[i].seq_id.empty()) cache.used--;
  1625. cache.cells[i].pos = -1;
  1626. cache.cells[i].seq_id.clear();
  1627. if (new_head == cache.size) new_head = i;
  1628. }
  1629. }
  1630. }
  1631. // If we freed up a slot, set head to it so searching can start there.
  1632. // Otherwise we just start the next search from the beginning.
  1633. cache.head = new_head != cache.size ? new_head : 0;
  1634. }
  1635. static void llama_kv_cache_seq_div(
  1636. struct llama_kv_cache & cache,
  1637. llama_seq_id seq_id,
  1638. llama_pos p0,
  1639. llama_pos p1,
  1640. int d) {
  1641. if (p0 < 0) p0 = 0;
  1642. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  1643. for (uint32_t i = 0; i < cache.size; ++i) {
  1644. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  1645. cache.has_shift = true;
  1646. {
  1647. llama_pos p_old = cache.cells[i].pos;
  1648. cache.cells[i].pos /= d;
  1649. cache.cells[i].delta += cache.cells[i].pos - p_old;
  1650. }
  1651. }
  1652. }
  1653. }
  1654. //
  1655. // model loading and saving
  1656. //
  1657. enum llama_fver {
  1658. GGUF_FILE_VERSION_V1 = 1,
  1659. GGUF_FILE_VERSION_V2 = 2,
  1660. GGUF_FILE_VERSION_V3 = 3,
  1661. };
  1662. static const char * llama_file_version_name(llama_fver version) {
  1663. switch (version) {
  1664. case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
  1665. case GGUF_FILE_VERSION_V2: return "GGUF V2";
  1666. case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
  1667. }
  1668. return "unknown";
  1669. }
  1670. static std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
  1671. char buf[256];
  1672. snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
  1673. for (size_t i = 1; i < ne.size(); i++) {
  1674. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
  1675. }
  1676. return buf;
  1677. }
  1678. static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
  1679. char buf[256];
  1680. snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
  1681. for (int i = 1; i < GGML_MAX_DIMS; i++) {
  1682. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
  1683. }
  1684. return buf;
  1685. }
  1686. namespace GGUFMeta {
  1687. template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int)>
  1688. struct GKV_Base_Type {
  1689. static constexpr gguf_type gt = gt_;
  1690. static T getter(const gguf_context * ctx, const int kid) {
  1691. return gfun(ctx, kid);
  1692. }
  1693. };
  1694. template<typename T> struct GKV_Base;
  1695. template<> struct GKV_Base<bool >: GKV_Base_Type<bool, GGUF_TYPE_BOOL, gguf_get_val_bool> {};
  1696. template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, GGUF_TYPE_UINT8, gguf_get_val_u8 > {};
  1697. template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, GGUF_TYPE_UINT16, gguf_get_val_u16 > {};
  1698. template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, GGUF_TYPE_UINT32, gguf_get_val_u32 > {};
  1699. template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, GGUF_TYPE_UINT64, gguf_get_val_u64 > {};
  1700. template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, GGUF_TYPE_INT8, gguf_get_val_i8 > {};
  1701. template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, GGUF_TYPE_INT16, gguf_get_val_i16 > {};
  1702. template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, GGUF_TYPE_INT32, gguf_get_val_i32 > {};
  1703. template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, GGUF_TYPE_INT64, gguf_get_val_i64 > {};
  1704. template<> struct GKV_Base<float >: GKV_Base_Type<float, GGUF_TYPE_FLOAT32, gguf_get_val_f32 > {};
  1705. template<> struct GKV_Base<double >: GKV_Base_Type<double, GGUF_TYPE_FLOAT64, gguf_get_val_f64 > {};
  1706. template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, GGUF_TYPE_STRING, gguf_get_val_str > {};
  1707. template<> struct GKV_Base<std::string> {
  1708. static constexpr gguf_type gt = GGUF_TYPE_STRING;
  1709. static std::string getter(const gguf_context * ctx, const int kid) {
  1710. return gguf_get_val_str(ctx, kid);
  1711. }
  1712. };
  1713. struct ArrayInfo{
  1714. const gguf_type gt;
  1715. const size_t length;
  1716. const void * data;
  1717. };
  1718. template<> struct GKV_Base<ArrayInfo> {
  1719. public:
  1720. static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
  1721. static ArrayInfo getter(const gguf_context *ctx, const int k) {
  1722. return ArrayInfo {
  1723. gguf_get_arr_type(ctx, k),
  1724. size_t(gguf_get_arr_n(ctx, k)),
  1725. gguf_get_arr_data(ctx, k),
  1726. };
  1727. }
  1728. };
  1729. template<typename T>
  1730. class GKV: public GKV_Base<T> {
  1731. GKV() = delete;
  1732. public:
  1733. static T get_kv(const gguf_context * ctx, const int k) {
  1734. const enum gguf_type kt = gguf_get_kv_type(ctx, k);
  1735. if (kt != GKV::gt) {
  1736. throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
  1737. gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt)));
  1738. }
  1739. return GKV::getter(ctx, k);
  1740. }
  1741. static const char * override_type_to_str(const llama_model_kv_override_type ty) {
  1742. switch (ty) {
  1743. case LLAMA_KV_OVERRIDE_BOOL: return "bool";
  1744. case LLAMA_KV_OVERRIDE_INT: return "int";
  1745. case LLAMA_KV_OVERRIDE_FLOAT: return "float";
  1746. }
  1747. return "unknown";
  1748. }
  1749. static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override *override) {
  1750. if (!override) { return false; }
  1751. if (override->tag == expected_type) {
  1752. LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
  1753. __func__, override_type_to_str(override->tag), override->key);
  1754. switch (override->tag) {
  1755. case LLAMA_KV_OVERRIDE_BOOL: {
  1756. printf("%s\n", override->bool_value ? "true" : "false");
  1757. } break;
  1758. case LLAMA_KV_OVERRIDE_INT: {
  1759. printf("%" PRId64 "\n", override->int_value);
  1760. } break;
  1761. case LLAMA_KV_OVERRIDE_FLOAT: {
  1762. printf("%.6f\n", override->float_value);
  1763. } break;
  1764. default:
  1765. // Shouldn't be possible to end up here, but just in case...
  1766. throw std::runtime_error(
  1767. format("Unsupported attempt to override %s type for metadata key %s\n",
  1768. override_type_to_str(override->tag), override->key));
  1769. }
  1770. return true;
  1771. }
  1772. LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
  1773. __func__, override->key, override_type_to_str(expected_type), override_type_to_str(override->tag));
  1774. return false;
  1775. }
  1776. template<typename OT>
  1777. static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
  1778. try_override(OT & target, const struct llama_model_kv_override *override) {
  1779. if (validate_override(LLAMA_KV_OVERRIDE_BOOL, override)) {
  1780. target = override->bool_value;
  1781. return true;
  1782. }
  1783. return false;
  1784. }
  1785. template<typename OT>
  1786. static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
  1787. try_override(OT & target, const struct llama_model_kv_override *override) {
  1788. if (validate_override(LLAMA_KV_OVERRIDE_INT, override)) {
  1789. target = override->int_value;
  1790. return true;
  1791. }
  1792. return false;
  1793. }
  1794. template<typename OT>
  1795. static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
  1796. try_override(T & target, const struct llama_model_kv_override *override) {
  1797. if (validate_override(LLAMA_KV_OVERRIDE_FLOAT, override)) {
  1798. target = override->float_value;
  1799. return true;
  1800. }
  1801. return false;
  1802. }
  1803. template<typename OT>
  1804. static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
  1805. try_override(T & target, const struct llama_model_kv_override *override) {
  1806. (void)target;
  1807. (void)override;
  1808. if (!override) { return false; }
  1809. // Currently, we should never end up here so it would be a bug if we do.
  1810. throw std::runtime_error(format("Unsupported attempt to override string type for metadata key %s\n",
  1811. override ? override->key : "NULL"));
  1812. }
  1813. static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override *override = nullptr) {
  1814. if (try_override<T>(target, override)) {
  1815. return true;
  1816. }
  1817. if (k < 0) { return false; }
  1818. target = get_kv(ctx, k);
  1819. return true;
  1820. }
  1821. static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override *override = nullptr) {
  1822. return set(ctx, gguf_find_key(ctx, key), target, override);
  1823. }
  1824. static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override *override = nullptr) {
  1825. return set(ctx, key.c_str(), target, override);
  1826. }
  1827. };
  1828. }
  1829. struct llama_model_loader {
  1830. int n_kv = 0;
  1831. int n_tensors = 0;
  1832. int n_created = 0;
  1833. int64_t n_elements = 0;
  1834. size_t n_bytes = 0;
  1835. bool use_mmap = false;
  1836. llama_file file;
  1837. llama_ftype ftype;
  1838. llama_fver fver;
  1839. std::unique_ptr<llama_mmap> mapping;
  1840. std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
  1841. struct gguf_context * ctx_gguf = NULL;
  1842. struct ggml_context * ctx_meta = NULL;
  1843. std::string arch_name;
  1844. LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
  1845. llama_model_loader(const std::string & fname, bool use_mmap, const struct llama_model_kv_override * param_overrides_p) : file(fname.c_str(), "rb") {
  1846. struct gguf_init_params params = {
  1847. /*.no_alloc = */ true,
  1848. /*.ctx = */ &ctx_meta,
  1849. };
  1850. if (param_overrides_p != nullptr) {
  1851. for (const struct llama_model_kv_override *p = param_overrides_p; p->key[0] != 0; p++) {
  1852. kv_overrides.insert({std::string(p->key), *p});
  1853. }
  1854. }
  1855. ctx_gguf = gguf_init_from_file(fname.c_str(), params);
  1856. if (!ctx_gguf) {
  1857. throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
  1858. }
  1859. get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
  1860. llm_kv = LLM_KV(llm_arch_from_string(arch_name));
  1861. n_kv = gguf_get_n_kv(ctx_gguf);
  1862. n_tensors = gguf_get_n_tensors(ctx_gguf);
  1863. fver = (enum llama_fver ) gguf_get_version(ctx_gguf);
  1864. for (int i = 0; i < n_tensors; i++) {
  1865. const char * name = gguf_get_tensor_name(ctx_gguf, i);
  1866. struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name);
  1867. n_elements += ggml_nelements(t);
  1868. n_bytes += ggml_nbytes(t);
  1869. }
  1870. LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
  1871. __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
  1872. // determine file type based on the number of tensors for each quantization and print meta data
  1873. // TODO: make optional
  1874. {
  1875. std::map<enum ggml_type, uint32_t> n_type;
  1876. uint32_t n_type_max = 0;
  1877. enum ggml_type type_max = GGML_TYPE_F32;
  1878. for (int i = 0; i < n_tensors; i++) {
  1879. enum ggml_type type = gguf_get_tensor_type(ctx_gguf, i);
  1880. n_type[type]++;
  1881. if (n_type_max < n_type[type]) {
  1882. n_type_max = n_type[type];
  1883. type_max = type;
  1884. }
  1885. // TODO: make runtime configurable
  1886. #if 0
  1887. struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i));
  1888. LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, ggml_get_name(meta), ggml_type_name(type), llama_format_tensor_shape(meta).c_str());
  1889. #endif
  1890. }
  1891. switch (type_max) {
  1892. case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
  1893. case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
  1894. case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
  1895. case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
  1896. case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
  1897. case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
  1898. case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
  1899. case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
  1900. case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
  1901. case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
  1902. case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
  1903. case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
  1904. case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
  1905. default:
  1906. {
  1907. LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
  1908. ftype = LLAMA_FTYPE_ALL_F32;
  1909. } break;
  1910. }
  1911. // this is a way to mark that we have "guessed" the file type
  1912. ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
  1913. {
  1914. const int kid = gguf_find_key(ctx_gguf, "general.file_type");
  1915. if (kid >= 0) {
  1916. ftype = (llama_ftype) gguf_get_val_u32(ctx_gguf, kid);
  1917. }
  1918. }
  1919. LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
  1920. for (int i = 0; i < n_kv; i++) {
  1921. const char * name = gguf_get_key(ctx_gguf, i);
  1922. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  1923. const std::string type_name =
  1924. type == GGUF_TYPE_ARRAY
  1925. ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx_gguf, i)), gguf_get_arr_n(ctx_gguf, i))
  1926. : gguf_type_name(type);
  1927. std::string value = gguf_kv_to_str(ctx_gguf, i);
  1928. const size_t MAX_VALUE_LEN = 40;
  1929. if (value.size() > MAX_VALUE_LEN) {
  1930. value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
  1931. }
  1932. replace_all(value, "\n", "\\n");
  1933. LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
  1934. }
  1935. // print type counts
  1936. for (auto & kv : n_type) {
  1937. if (kv.second == 0) {
  1938. continue;
  1939. }
  1940. LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
  1941. }
  1942. }
  1943. if (!llama_mmap::SUPPORTED) {
  1944. LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
  1945. use_mmap = false;
  1946. }
  1947. this->use_mmap = use_mmap;
  1948. }
  1949. ~llama_model_loader() {
  1950. if (ctx_gguf) {
  1951. gguf_free(ctx_gguf);
  1952. }
  1953. if (ctx_meta) {
  1954. ggml_free(ctx_meta);
  1955. }
  1956. }
  1957. template<typename T>
  1958. typename std::enable_if<std::is_integral<T>::value, bool>::type
  1959. get_arr_n(const std::string & key, T & result, const bool required = true) {
  1960. const int kid = gguf_find_key(ctx_gguf, key.c_str());
  1961. if (kid < 0) {
  1962. if (required) {
  1963. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  1964. }
  1965. return false;
  1966. }
  1967. struct GGUFMeta::ArrayInfo arr_info =
  1968. GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(ctx_gguf, kid);
  1969. result = arr_info.length;
  1970. return true;
  1971. }
  1972. template<typename T>
  1973. typename std::enable_if<std::is_integral<T>::value, bool>::type
  1974. get_arr_n(const enum llm_kv kid, T & result, const bool required = true) {
  1975. return get_arr_n(llm_kv(kid), result, required);
  1976. }
  1977. template<typename T>
  1978. bool get_key(const std::string & key, T & result, const bool required = true) {
  1979. auto it = kv_overrides.find(key);
  1980. const struct llama_model_kv_override * override =
  1981. it != kv_overrides.end() ? &it->second : nullptr;
  1982. const bool found = GGUFMeta::GKV<T>::set(ctx_gguf, key, result, override);
  1983. if (required && !found) {
  1984. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  1985. }
  1986. return found;
  1987. }
  1988. template<typename T>
  1989. bool get_key(const enum llm_kv kid, T & result, const bool required = true) {
  1990. return get_key(llm_kv(kid), result, required);
  1991. }
  1992. std::string get_arch_name() const {
  1993. return arch_name;
  1994. }
  1995. enum llm_arch get_arch() const {
  1996. return llm_kv.arch;
  1997. }
  1998. const char * get_tensor_name(int i) const {
  1999. return gguf_get_tensor_name(ctx_gguf, i);
  2000. }
  2001. struct ggml_tensor * get_tensor_meta(const char * name) const {
  2002. return ggml_get_tensor(ctx_meta, name);
  2003. }
  2004. struct ggml_tensor * get_tensor_meta(int i) const {
  2005. return get_tensor_meta(get_tensor_name(i));
  2006. }
  2007. struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend_type backend) {
  2008. struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta);
  2009. tensor->backend = backend; // TODO: ggml_set_backend
  2010. ggml_set_name(tensor, ggml_get_name(meta));
  2011. n_created++;
  2012. return tensor;
  2013. }
  2014. struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool required = true) {
  2015. struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
  2016. if (cur == NULL) {
  2017. if (!required) {
  2018. return NULL;
  2019. }
  2020. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
  2021. }
  2022. if (backend == GGML_BACKEND_GPU_SPLIT) {
  2023. if (ne.size() == 1) {
  2024. throw std::runtime_error(format("%s: 1-dimensional tensor '%s' cannot be split on the GPU", __func__, name.c_str()));
  2025. }
  2026. }
  2027. {
  2028. bool is_ok = true;
  2029. for (size_t i = 0; i < ne.size(); ++i) {
  2030. if (ne[i] != cur->ne[i]) {
  2031. is_ok = false;
  2032. break;
  2033. }
  2034. }
  2035. if (!is_ok) {
  2036. throw std::runtime_error(
  2037. format("%s: tensor '%s' has wrong shape; expected %s, got %s",
  2038. __func__, name.c_str(),
  2039. llama_format_tensor_shape(ne).c_str(),
  2040. llama_format_tensor_shape(cur).c_str()));
  2041. }
  2042. }
  2043. return create_tensor_for(ctx, cur, backend);
  2044. }
  2045. void done_getting_tensors() const {
  2046. if (n_created != n_tensors) {
  2047. throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
  2048. }
  2049. }
  2050. size_t file_offset(const char * name) const {
  2051. const int idx = gguf_find_tensor(ctx_gguf, name);
  2052. if (idx < 0) {
  2053. throw std::runtime_error(format("%s: tensor '%s' not found in the file", __func__, name));
  2054. }
  2055. return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx);
  2056. }
  2057. void init_mapping(bool prefetch = true) {
  2058. /*
  2059. // prefetch only CPU tensors
  2060. if (use_mmap) {
  2061. size_t size_pref = 0; // prefetch
  2062. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  2063. struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
  2064. if (cur->backend == GGML_BACKEND_CPU) {
  2065. size_t tensor_end = gguf_get_tensor_offset(ctx_gguf, i) + ggml_nbytes(cur);
  2066. size_pref = std::max(size_pref, tensor_end);
  2067. }
  2068. }
  2069. mapping.reset(new llama_mmap(&file, gguf_get_data_offset(ctx_gguf) + size_pref, ggml_is_numa()));
  2070. }
  2071. */
  2072. // prefetch the whole file - all the data is needed anyway
  2073. if (use_mmap) {
  2074. mapping.reset(new llama_mmap(&file, prefetch ? -1 : 0, ggml_is_numa()));
  2075. }
  2076. }
  2077. // for backwards compatibility, does not support ggml-backend
  2078. void load_data_for(struct ggml_tensor * cur) const {
  2079. const size_t offs = file_offset(ggml_get_name(cur));
  2080. if (use_mmap && mapping) {
  2081. GGML_ASSERT(cur->data == nullptr);
  2082. cur->data = (uint8_t *)mapping->addr + offs;
  2083. } else {
  2084. GGML_ASSERT(cur->data != nullptr);
  2085. file.seek(offs, SEEK_SET);
  2086. file.read_raw(cur->data, ggml_nbytes(cur));
  2087. }
  2088. }
  2089. // Returns false if cancelled by progress_callback
  2090. bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) const {
  2091. size_t size_data = 0;
  2092. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  2093. struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
  2094. size_data += ggml_nbytes(cur);
  2095. }
  2096. if (use_mmap && buf_mmap) {
  2097. if (lmlock) {
  2098. lmlock->init(mapping->addr);
  2099. }
  2100. }
  2101. #if (defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)) || defined(GGML_USE_CLBLAST)
  2102. const bool legacy_offload = true;
  2103. #else
  2104. const bool legacy_offload = false;
  2105. #endif
  2106. std::vector<no_init<uint8_t>> read_buf;
  2107. size_t size_done = 0;
  2108. size_t mmap_first = -1;
  2109. size_t mmap_last = 0;
  2110. for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
  2111. struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
  2112. GGML_ASSERT(cur); // unused tensors should have been caught by load_data already
  2113. if (progress_callback) {
  2114. if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
  2115. return false;
  2116. }
  2117. }
  2118. const size_t offs = file_offset(ggml_get_name(cur));
  2119. if (!legacy_offload || cur->backend == GGML_BACKEND_CPU) {
  2120. if (use_mmap && mapping) {
  2121. if (buf_mmap) {
  2122. ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + offs);
  2123. if (lmlock) {
  2124. lmlock->grow_to(offs + ggml_nbytes(cur));
  2125. }
  2126. mmap_first = std::min(mmap_first, offs);
  2127. mmap_last = std::max(mmap_last, offs + ggml_nbytes(cur));
  2128. } else {
  2129. ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + offs, 0, ggml_nbytes(cur));
  2130. }
  2131. } else {
  2132. if (ggml_backend_buffer_is_host(cur->buffer)) {
  2133. file.seek(offs, SEEK_SET);
  2134. file.read_raw(cur->data, ggml_nbytes(cur));
  2135. } else {
  2136. read_buf.resize(ggml_nbytes(cur));
  2137. file.seek(offs, SEEK_SET);
  2138. file.read_raw(read_buf.data(), ggml_nbytes(cur));
  2139. ggml_backend_tensor_set(cur, read_buf.data(), 0, ggml_nbytes(cur));
  2140. }
  2141. }
  2142. } else {
  2143. // HACK: mark tensor as allocated
  2144. cur->data = (void *)(uintptr_t)1;
  2145. void * data;
  2146. if (use_mmap && mapping) {
  2147. data = (uint8_t *) mapping->addr + offs;
  2148. } else {
  2149. read_buf.resize(ggml_nbytes(cur));
  2150. file.seek(offs, SEEK_SET);
  2151. file.read_raw(read_buf.data(), ggml_nbytes(cur));
  2152. data = read_buf.data();
  2153. }
  2154. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  2155. ggml_cuda_transform_tensor(data, cur);
  2156. #elif defined(GGML_USE_CLBLAST)
  2157. GGML_ASSERT(cur->backend == GGML_BACKEND_GPU);
  2158. ggml_cl_transform_tensor(data, cur);
  2159. #else
  2160. GGML_ASSERT(!"GPU tensor without a GPU backend");
  2161. GGML_UNUSED(data);
  2162. #endif
  2163. }
  2164. size_done += ggml_nbytes(cur);
  2165. }
  2166. // unmap offloaded tensors and metadata
  2167. if (use_mmap && mapping) {
  2168. mapping->unmap_fragment(0, mmap_first);
  2169. mapping->unmap_fragment(mmap_last, mapping->size);
  2170. }
  2171. if (progress_callback) {
  2172. // Even though the model is done loading, we still honor
  2173. // cancellation since we need to free allocations.
  2174. return progress_callback(1.0f, progress_callback_user_data);
  2175. }
  2176. return true;
  2177. }
  2178. };
  2179. //
  2180. // load LLaMA models
  2181. //
  2182. static std::string llama_model_arch_name(llm_arch arch) {
  2183. auto it = LLM_ARCH_NAMES.find(arch);
  2184. if (it == LLM_ARCH_NAMES.end()) {
  2185. return "unknown";
  2186. }
  2187. return it->second;
  2188. }
  2189. static std::string llama_model_ftype_name(llama_ftype ftype) {
  2190. if (ftype & LLAMA_FTYPE_GUESSED) {
  2191. return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
  2192. }
  2193. switch (ftype) {
  2194. case LLAMA_FTYPE_ALL_F32: return "all F32";
  2195. case LLAMA_FTYPE_MOSTLY_F16: return "F16";
  2196. case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
  2197. case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
  2198. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  2199. return "Q4_1, some F16";
  2200. case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
  2201. case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
  2202. case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
  2203. // K-quants
  2204. case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K";
  2205. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
  2206. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
  2207. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
  2208. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
  2209. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
  2210. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
  2211. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
  2212. case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
  2213. case LLAMA_FTYPE_MOSTLY_IQ2_XXS:return "IQ2_XSS - 2.0625 bpw";
  2214. default: return "unknown, may not work";
  2215. }
  2216. }
  2217. static const char * llama_model_type_name(e_model type) {
  2218. switch (type) {
  2219. case MODEL_1B: return "1B";
  2220. case MODEL_3B: return "3B";
  2221. case MODEL_7B: return "7B";
  2222. case MODEL_8B: return "8B";
  2223. case MODEL_13B: return "13B";
  2224. case MODEL_15B: return "15B";
  2225. case MODEL_30B: return "30B";
  2226. case MODEL_34B: return "34B";
  2227. case MODEL_40B: return "40B";
  2228. case MODEL_65B: return "65B";
  2229. case MODEL_70B: return "70B";
  2230. case MODEL_SMALL: return "0.1B";
  2231. case MODEL_MEDIUM: return "0.4B";
  2232. case MODEL_LARGE: return "0.8B";
  2233. case MODEL_XL: return "1.5B";
  2234. default: return "?B";
  2235. }
  2236. }
  2237. static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
  2238. model.arch = ml.get_arch();
  2239. if (model.arch == LLM_ARCH_UNKNOWN) {
  2240. throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
  2241. }
  2242. }
  2243. static void llm_load_hparams(
  2244. llama_model_loader & ml,
  2245. llama_model & model) {
  2246. auto & hparams = model.hparams;
  2247. const gguf_context * ctx = ml.ctx_gguf;
  2248. // get metadata as string
  2249. for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
  2250. enum gguf_type type = gguf_get_kv_type(ctx, i);
  2251. if (type == GGUF_TYPE_ARRAY) {
  2252. continue;
  2253. }
  2254. const char * name = gguf_get_key(ctx, i);
  2255. const std::string value = gguf_kv_to_str(ctx, i);
  2256. model.gguf_kv.emplace(name, value);
  2257. }
  2258. // get general kv
  2259. ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
  2260. // get hparams kv
  2261. ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
  2262. ml.get_key (LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
  2263. ml.get_key (LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
  2264. ml.get_key (LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
  2265. ml.get_key (LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head);
  2266. ml.get_key (LLM_KV_BLOCK_COUNT, hparams.n_layer);
  2267. ml.get_key (LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
  2268. ml.get_key (LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
  2269. GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
  2270. GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
  2271. if (hparams.n_expert > 0) {
  2272. GGML_ASSERT(hparams.n_expert_used > 0);
  2273. } else {
  2274. GGML_ASSERT(hparams.n_expert_used == 0);
  2275. }
  2276. // n_head_kv is optional, default to n_head
  2277. hparams.n_head_kv = hparams.n_head;
  2278. ml.get_key(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv, false);
  2279. bool rope_finetuned = false;
  2280. ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
  2281. hparams.rope_finetuned = rope_finetuned;
  2282. hparams.n_yarn_orig_ctx = hparams.n_ctx_train;
  2283. ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_yarn_orig_ctx, false);
  2284. // rope_freq_base (optional)
  2285. hparams.rope_freq_base_train = 10000.0f;
  2286. ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
  2287. std::string rope_scaling("linear");
  2288. ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
  2289. hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
  2290. GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_UNSPECIFIED);
  2291. // rope_freq_scale (inverse of the kv) is optional
  2292. float ropescale = 0.0f;
  2293. if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
  2294. // try the old key name
  2295. ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
  2296. }
  2297. hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
  2298. // sanity check for n_rot (optional)
  2299. {
  2300. hparams.n_rot = hparams.n_embd / hparams.n_head;
  2301. ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
  2302. if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
  2303. if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
  2304. throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
  2305. }
  2306. }
  2307. // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
  2308. // gpt-j n_rot = rotary_dim
  2309. }
  2310. hparams.n_embd_head_k = hparams.n_embd / hparams.n_head;
  2311. ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
  2312. hparams.n_embd_head_v = hparams.n_embd / hparams.n_head;
  2313. ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
  2314. // arch-specific KVs
  2315. switch (model.arch) {
  2316. case LLM_ARCH_LLAMA:
  2317. {
  2318. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2319. switch (hparams.n_layer) {
  2320. case 22: model.type = e_model::MODEL_1B; break;
  2321. case 26: model.type = e_model::MODEL_3B; break;
  2322. case 32: model.type = e_model::MODEL_7B; break;
  2323. case 40: model.type = e_model::MODEL_13B; break;
  2324. case 48: model.type = e_model::MODEL_34B; break;
  2325. case 60: model.type = e_model::MODEL_30B; break;
  2326. case 80: model.type = hparams.n_head == hparams.n_head_kv ? e_model::MODEL_65B : e_model::MODEL_70B; break;
  2327. default: model.type = e_model::MODEL_UNKNOWN;
  2328. }
  2329. } break;
  2330. case LLM_ARCH_FALCON:
  2331. {
  2332. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2333. switch (hparams.n_layer) {
  2334. case 32: model.type = e_model::MODEL_7B; break;
  2335. case 60: model.type = e_model::MODEL_40B; break;
  2336. default: model.type = e_model::MODEL_UNKNOWN;
  2337. }
  2338. } break;
  2339. case LLM_ARCH_BAICHUAN:
  2340. {
  2341. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2342. switch (hparams.n_layer) {
  2343. case 32: model.type = e_model::MODEL_7B; break;
  2344. case 40: model.type = e_model::MODEL_13B; break;
  2345. default: model.type = e_model::MODEL_UNKNOWN;
  2346. }
  2347. } break;
  2348. case LLM_ARCH_STARCODER:
  2349. {
  2350. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2351. switch (hparams.n_layer) {
  2352. case 24: model.type = e_model::MODEL_1B; break;
  2353. case 36: model.type = e_model::MODEL_3B; break;
  2354. case 42: model.type = e_model::MODEL_7B; break;
  2355. case 40: model.type = e_model::MODEL_15B; break;
  2356. default: model.type = e_model::MODEL_UNKNOWN;
  2357. }
  2358. } break;
  2359. case LLM_ARCH_PERSIMMON:
  2360. {
  2361. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2362. switch (hparams.n_layer) {
  2363. case 36: model.type = e_model::MODEL_8B; break;
  2364. default: model.type = e_model::MODEL_UNKNOWN;
  2365. }
  2366. } break;
  2367. case LLM_ARCH_REFACT:
  2368. {
  2369. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2370. switch (hparams.n_layer) {
  2371. case 32: model.type = e_model::MODEL_1B; break;
  2372. default: model.type = e_model::MODEL_UNKNOWN;
  2373. }
  2374. } break;
  2375. case LLM_ARCH_BLOOM:
  2376. {
  2377. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2378. switch (hparams.n_layer) {
  2379. case 24: model.type = e_model::MODEL_1B; break;
  2380. case 30:
  2381. switch (hparams.n_embd) {
  2382. case 2560: model.type = e_model::MODEL_3B; break;
  2383. case 4096: model.type = e_model::MODEL_7B; break;
  2384. } break;
  2385. }
  2386. } break;
  2387. case LLM_ARCH_MPT:
  2388. {
  2389. hparams.f_clamp_kqv = 0.0f;
  2390. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2391. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  2392. ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
  2393. switch (hparams.n_layer) {
  2394. case 32: model.type = e_model::MODEL_7B; break;
  2395. case 48: model.type = e_model::MODEL_30B; break;
  2396. default: model.type = e_model::MODEL_UNKNOWN;
  2397. }
  2398. } break;
  2399. case LLM_ARCH_STABLELM:
  2400. {
  2401. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2402. switch (hparams.n_layer) {
  2403. case 32: model.type = e_model::MODEL_3B; break;
  2404. default: model.type = e_model::MODEL_UNKNOWN;
  2405. }
  2406. } break;
  2407. case LLM_ARCH_QWEN:
  2408. {
  2409. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2410. switch (hparams.n_layer) {
  2411. case 32: model.type = e_model::MODEL_7B; break;
  2412. case 40: model.type = e_model::MODEL_13B; break;
  2413. default: model.type = e_model::MODEL_UNKNOWN;
  2414. }
  2415. } break;
  2416. case LLM_ARCH_PHI2:
  2417. {
  2418. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2419. switch (hparams.n_layer) {
  2420. case 24: model.type = e_model::MODEL_1B; break;
  2421. case 32: model.type = e_model::MODEL_3B; break;
  2422. default: model.type = e_model::MODEL_UNKNOWN;
  2423. }
  2424. } break;
  2425. case LLM_ARCH_PLAMO:
  2426. {
  2427. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  2428. switch (hparams.n_layer) {
  2429. case 40: model.type = e_model::MODEL_13B; break;
  2430. default: model.type = e_model::MODEL_UNKNOWN;
  2431. }
  2432. } break;
  2433. case LLM_ARCH_GPT2:
  2434. {
  2435. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  2436. switch (hparams.n_layer) {
  2437. case 12: model.type = e_model::MODEL_SMALL; break;
  2438. case 24: model.type = e_model::MODEL_MEDIUM; break;
  2439. case 36: model.type = e_model::MODEL_LARGE; break;
  2440. case 48: model.type = e_model::MODEL_XL; break;
  2441. default: model.type = e_model::MODEL_UNKNOWN;
  2442. }
  2443. } break;
  2444. default: (void)0;
  2445. }
  2446. model.ftype = ml.ftype;
  2447. }
  2448. // TODO: This should probably be in llama.h
  2449. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special = false);
  2450. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch);
  2451. static void llm_load_vocab(
  2452. llama_model_loader & ml,
  2453. llama_model & model) {
  2454. auto & vocab = model.vocab;
  2455. struct gguf_context * ctx = ml.ctx_gguf;
  2456. const auto kv = LLM_KV(model.arch);
  2457. const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
  2458. if (token_idx == -1) {
  2459. throw std::runtime_error("cannot find tokenizer vocab in model file\n");
  2460. }
  2461. const float * scores = nullptr;
  2462. const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
  2463. if (score_idx != -1) {
  2464. scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
  2465. }
  2466. const int * toktypes = nullptr;
  2467. const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
  2468. if (toktype_idx != -1) {
  2469. toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
  2470. }
  2471. // determine vocab type
  2472. {
  2473. std::string tokenizer_name;
  2474. ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_name);
  2475. if (tokenizer_name == "llama") {
  2476. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  2477. // default special tokens
  2478. vocab.special_bos_id = 1;
  2479. vocab.special_eos_id = 2;
  2480. vocab.special_unk_id = 0;
  2481. vocab.special_sep_id = -1;
  2482. vocab.special_pad_id = -1;
  2483. } else if (tokenizer_name == "gpt2") {
  2484. vocab.type = LLAMA_VOCAB_TYPE_BPE;
  2485. // read bpe merges and populate bpe ranks
  2486. const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
  2487. if (merges_keyidx == -1) {
  2488. throw std::runtime_error("cannot find tokenizer merges in model file\n");
  2489. }
  2490. const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
  2491. for (int i = 0; i < n_merges; i++) {
  2492. const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
  2493. GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
  2494. std::string first;
  2495. std::string second;
  2496. const size_t pos = word.find(' ', 1);
  2497. if (pos != std::string::npos) {
  2498. first = word.substr(0, pos);
  2499. second = word.substr(pos + 1);
  2500. }
  2501. vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
  2502. }
  2503. // default special tokens
  2504. vocab.special_bos_id = 11;
  2505. vocab.special_eos_id = 11;
  2506. vocab.special_unk_id = -1;
  2507. vocab.special_sep_id = -1;
  2508. vocab.special_pad_id = -1;
  2509. } else {
  2510. LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str());
  2511. LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__);
  2512. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  2513. }
  2514. }
  2515. const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
  2516. vocab.id_to_token.resize(n_vocab);
  2517. for (uint32_t i = 0; i < n_vocab; i++) {
  2518. std::string word = gguf_get_arr_str(ctx, token_idx, i);
  2519. GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
  2520. vocab.token_to_id[word] = i;
  2521. auto & token_data = vocab.id_to_token[i];
  2522. token_data.text = std::move(word);
  2523. token_data.score = scores ? scores[i] : 0.0f;
  2524. token_data.type = toktypes ? (llama_token_type) toktypes[i] : LLAMA_TOKEN_TYPE_NORMAL;
  2525. }
  2526. GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
  2527. // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
  2528. if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
  2529. vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
  2530. } else {
  2531. const std::vector<int> ids = llama_tokenize_internal(vocab, "\u010A", false);
  2532. GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
  2533. vocab.linefeed_id = ids[0];
  2534. }
  2535. // special tokens
  2536. {
  2537. const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
  2538. { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
  2539. { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
  2540. { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
  2541. { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
  2542. { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
  2543. };
  2544. for (const auto & it : special_token_types) {
  2545. const std::string & key = kv(std::get<0>(it));
  2546. int32_t & id = std::get<1>(it);
  2547. uint32_t new_id;
  2548. if (!ml.get_key(std::get<0>(it), new_id, false)) {
  2549. continue;
  2550. }
  2551. if (new_id >= vocab.id_to_token.size()) {
  2552. LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
  2553. __func__, key.c_str(), new_id, id);
  2554. } else {
  2555. id = new_id;
  2556. }
  2557. }
  2558. // Handle add_bos_token and add_eos_token
  2559. {
  2560. bool temp = true;
  2561. if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
  2562. vocab.special_add_bos = int(temp);
  2563. }
  2564. if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
  2565. vocab.special_add_eos = int(temp);
  2566. }
  2567. }
  2568. }
  2569. // build special tokens cache
  2570. {
  2571. // TODO: It is unclear (to me) at this point, whether special tokes are guaranteed to be of a deterministic type,
  2572. // and will always be correctly labeled in 'added_tokens.json' etc.
  2573. // The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
  2574. // to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
  2575. // are special tokens.
  2576. // From testing, this appears to correlate 1:1 with special tokens.
  2577. //
  2578. // Counting special tokens and verifying in only one direction
  2579. // is sufficient to detect difference in those two sets.
  2580. //
  2581. uint32_t special_tokens_count_by_type = 0;
  2582. uint32_t special_tokens_count_from_verification = 0;
  2583. bool special_tokens_definition_mismatch = false;
  2584. for (const auto & t : vocab.token_to_id) {
  2585. const auto & token = t.first;
  2586. const auto & id = t.second;
  2587. // Count all non-normal tokens in the vocab while iterating
  2588. if (vocab.id_to_token[id].type != LLAMA_TOKEN_TYPE_NORMAL) {
  2589. special_tokens_count_by_type++;
  2590. }
  2591. // Skip single character tokens
  2592. if (token.length() > 1) {
  2593. bool is_tokenizable = false;
  2594. // Split token string representation in two, in all possible ways
  2595. // and check if both halves can be matched to a valid token
  2596. for (unsigned i = 1; i < token.length();) {
  2597. const auto left = token.substr(0, i);
  2598. const auto right = token.substr(i);
  2599. // check if we didnt partition in the middle of a utf sequence
  2600. auto utf = utf8_len(left.at(left.length() - 1));
  2601. if (utf == 1) {
  2602. if (vocab.token_to_id.find(left) != vocab.token_to_id.end() &&
  2603. vocab.token_to_id.find(right) != vocab.token_to_id.end() ) {
  2604. is_tokenizable = true;
  2605. break;
  2606. }
  2607. i++;
  2608. } else {
  2609. // skip over the rest of multibyte utf sequence
  2610. i += utf - 1;
  2611. }
  2612. }
  2613. if (!is_tokenizable) {
  2614. // Some tokens are multibyte, but they are utf sequences with equivalent text length of 1
  2615. // it's faster to re-filter them here, since there are way less candidates now
  2616. // Calculate a total "utf" length of a token string representation
  2617. size_t utf8_str_len = 0;
  2618. for (unsigned i = 0; i < token.length();) {
  2619. utf8_str_len++;
  2620. i += utf8_len(token.at(i));
  2621. }
  2622. // And skip the ones which are one character
  2623. if (utf8_str_len > 1) {
  2624. // At this point what we have left are special tokens only
  2625. vocab.special_tokens_cache[token] = id;
  2626. // Count manually found special tokens
  2627. special_tokens_count_from_verification++;
  2628. // If this manually found special token is not marked as such, flag a mismatch
  2629. if (vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL) {
  2630. special_tokens_definition_mismatch = true;
  2631. }
  2632. }
  2633. }
  2634. }
  2635. }
  2636. if (special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type) {
  2637. LLAMA_LOG_WARN("%s: mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
  2638. __func__,
  2639. special_tokens_count_from_verification, vocab.id_to_token.size(),
  2640. special_tokens_count_by_type, vocab.id_to_token.size()
  2641. );
  2642. } else {
  2643. LLAMA_LOG_INFO("%s: special tokens definition check successful ( %u/%zu ).\n",
  2644. __func__,
  2645. special_tokens_count_from_verification, vocab.id_to_token.size()
  2646. );
  2647. }
  2648. }
  2649. }
  2650. static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
  2651. const auto & hparams = model.hparams;
  2652. const auto & vocab = model.vocab;
  2653. const auto rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
  2654. // hparams
  2655. LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
  2656. LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch).c_str());
  2657. LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, vocab.type == LLAMA_VOCAB_TYPE_SPM ? "SPM" : "BPE"); // TODO: fix
  2658. LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  2659. LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
  2660. LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
  2661. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  2662. LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
  2663. LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
  2664. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  2665. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot);
  2666. LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k);
  2667. LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v);
  2668. LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
  2669. LLAMA_LOG_INFO("%s: n_embd_k_gqa = %u\n", __func__, hparams.n_embd_k_gqa());
  2670. LLAMA_LOG_INFO("%s: n_embd_v_gqa = %u\n", __func__, hparams.n_embd_v_gqa());
  2671. LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
  2672. LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
  2673. LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
  2674. LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
  2675. LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
  2676. LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
  2677. LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
  2678. LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type.c_str());
  2679. LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
  2680. LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
  2681. LLAMA_LOG_INFO("%s: n_yarn_orig_ctx = %u\n", __func__, hparams.n_yarn_orig_ctx);
  2682. LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
  2683. LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
  2684. LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
  2685. if (ml.n_elements >= 1e12) {
  2686. LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, ml.n_elements*1e-12);
  2687. } else if (ml.n_elements >= 1e9) {
  2688. LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
  2689. } else if (ml.n_elements >= 1e6) {
  2690. LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, ml.n_elements*1e-6);
  2691. } else {
  2692. LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
  2693. }
  2694. if (ml.n_bytes < GiB) {
  2695. LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  2696. } else {
  2697. LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  2698. }
  2699. // general kv
  2700. LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
  2701. // special tokens
  2702. if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
  2703. if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
  2704. if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
  2705. if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
  2706. if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
  2707. if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
  2708. }
  2709. // Returns false if cancelled by progress_callback
  2710. static bool llm_load_tensors(
  2711. llama_model_loader & ml,
  2712. llama_model & model,
  2713. int n_gpu_layers,
  2714. int main_gpu,
  2715. const float * tensor_split,
  2716. bool use_mlock,
  2717. llama_progress_callback progress_callback,
  2718. void * progress_callback_user_data) {
  2719. model.t_start_us = ggml_time_us();
  2720. auto & ctx = model.ctx;
  2721. auto & hparams = model.hparams;
  2722. model.n_gpu_layers = n_gpu_layers;
  2723. size_t ctx_size = ggml_tensor_overhead() * ml.n_tensors;
  2724. LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, ctx_size/1024.0/1024.0);
  2725. // create the ggml context
  2726. {
  2727. struct ggml_init_params params = {
  2728. /*.mem_size =*/ ctx_size,
  2729. /*.mem_buffer =*/ NULL,
  2730. /*.no_alloc =*/ true,
  2731. };
  2732. model.ctx = ggml_init(params);
  2733. if (!model.ctx) {
  2734. throw std::runtime_error(format("ggml_init() failed"));
  2735. }
  2736. }
  2737. (void) main_gpu;
  2738. enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU;
  2739. enum ggml_backend_type llama_backend_offload_split = GGML_BACKEND_CPU;
  2740. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  2741. if (ggml_cublas_loaded()) {
  2742. LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__);
  2743. ggml_cuda_set_main_device(main_gpu);
  2744. llama_backend_offload = GGML_BACKEND_GPU;
  2745. llama_backend_offload_split = GGML_BACKEND_GPU_SPLIT;
  2746. }
  2747. #elif defined(GGML_USE_CLBLAST)
  2748. LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
  2749. llama_backend_offload = GGML_BACKEND_GPU;
  2750. llama_backend_offload_split = GGML_BACKEND_GPU;
  2751. #endif
  2752. // create tensors for the weights
  2753. {
  2754. const int64_t n_embd = hparams.n_embd;
  2755. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  2756. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  2757. const int64_t n_layer = hparams.n_layer;
  2758. const int64_t n_vocab = hparams.n_vocab;
  2759. const auto tn = LLM_TN(model.arch);
  2760. switch (model.arch) {
  2761. case LLM_ARCH_LLAMA:
  2762. case LLM_ARCH_REFACT:
  2763. {
  2764. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2765. // output
  2766. {
  2767. ggml_backend_type backend_norm;
  2768. ggml_backend_type backend_output;
  2769. if (n_gpu_layers > int(n_layer)) {
  2770. backend_norm = llama_backend_offload;
  2771. backend_output = llama_backend_offload_split;
  2772. } else {
  2773. backend_norm = GGML_BACKEND_CPU;
  2774. backend_output = GGML_BACKEND_CPU;
  2775. }
  2776. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2777. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2778. }
  2779. const uint32_t n_ff = hparams.n_ff;
  2780. const int64_t n_embd_gqa = n_embd_v_gqa;
  2781. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  2782. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  2783. const int i_gpu_start = n_layer - n_gpu_layers;
  2784. model.layers.resize(n_layer);
  2785. for (uint32_t i = 0; i < n_layer; ++i) {
  2786. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2787. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2788. auto & layer = model.layers[i];
  2789. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2790. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  2791. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2792. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2793. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2794. // optional bias tensors
  2795. layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, false);
  2796. layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, false);
  2797. layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, false);
  2798. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, false);
  2799. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2800. layer.ffn_gate_inp = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd}, backend, false);
  2801. if (layer.ffn_gate_inp == nullptr) {
  2802. GGML_ASSERT(hparams.n_expert == 0);
  2803. GGML_ASSERT(hparams.n_expert_used == 0);
  2804. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  2805. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2806. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2807. } else {
  2808. GGML_ASSERT(hparams.n_expert > 0);
  2809. GGML_ASSERT(hparams.n_expert_used > 0);
  2810. // MoE branch
  2811. for (uint32_t x = 0; x < hparams.n_expert; ++x) {
  2812. layer.ffn_gate_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), {n_embd, n_ff}, backend_split);
  2813. layer.ffn_down_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd}, backend_split);
  2814. layer.ffn_up_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), {n_embd, n_ff}, backend_split);
  2815. }
  2816. }
  2817. }
  2818. } break;
  2819. case LLM_ARCH_BAICHUAN:
  2820. {
  2821. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2822. {
  2823. ggml_backend_type backend_norm;
  2824. ggml_backend_type backend_output;
  2825. if (n_gpu_layers > int(n_layer)) {
  2826. backend_norm = llama_backend_offload;
  2827. backend_output = llama_backend_offload_split;
  2828. } else {
  2829. backend_norm = GGML_BACKEND_CPU;
  2830. backend_output = GGML_BACKEND_CPU;
  2831. }
  2832. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2833. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2834. }
  2835. const uint32_t n_ff = hparams.n_ff;
  2836. const int64_t n_embd_gqa = n_embd_v_gqa;
  2837. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  2838. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  2839. const int i_gpu_start = n_layer - n_gpu_layers;
  2840. model.layers.resize(n_layer);
  2841. for (uint32_t i = 0; i < n_layer; ++i) {
  2842. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2843. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2844. auto & layer = model.layers[i];
  2845. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2846. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  2847. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2848. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  2849. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2850. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2851. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  2852. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2853. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2854. }
  2855. } break;
  2856. case LLM_ARCH_FALCON:
  2857. {
  2858. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2859. // output
  2860. {
  2861. ggml_backend_type backend_norm;
  2862. ggml_backend_type backend_output;
  2863. if (n_gpu_layers > int(n_layer)) {
  2864. backend_norm = llama_backend_offload;
  2865. backend_output = llama_backend_offload_split;
  2866. } else {
  2867. backend_norm = GGML_BACKEND_CPU;
  2868. backend_output = GGML_BACKEND_CPU;
  2869. }
  2870. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2871. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2872. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2873. }
  2874. const uint32_t n_ff = hparams.n_ff;
  2875. const int64_t n_embd_gqa = n_embd_v_gqa;
  2876. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  2877. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  2878. const int i_gpu_start = n_layer - n_gpu_layers;
  2879. model.layers.resize(n_layer);
  2880. for (uint32_t i = 0; i < n_layer; ++i) {
  2881. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2882. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2883. auto & layer = model.layers[i];
  2884. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2885. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2886. if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i).c_str()) >= 0) {
  2887. layer.attn_norm_2 = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, backend);
  2888. layer.attn_norm_2_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, backend);
  2889. }
  2890. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2891. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2892. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  2893. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2894. }
  2895. } break;
  2896. case LLM_ARCH_STARCODER:
  2897. {
  2898. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2899. model.pos_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU);
  2900. // output
  2901. {
  2902. ggml_backend_type backend_norm;
  2903. ggml_backend_type backend_output;
  2904. if (n_gpu_layers > int(n_layer)) {
  2905. backend_norm = llama_backend_offload;
  2906. backend_output = llama_backend_offload_split;
  2907. } else {
  2908. backend_norm = GGML_BACKEND_CPU;
  2909. backend_output = GGML_BACKEND_CPU;
  2910. }
  2911. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2912. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2913. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2914. }
  2915. const uint32_t n_ff = hparams.n_ff;
  2916. const int64_t n_embd_gqa = n_embd_v_gqa;
  2917. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  2918. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  2919. const int i_gpu_start = n_layer - n_gpu_layers;
  2920. model.layers.resize(n_layer);
  2921. for (uint32_t i = 0; i < n_layer; ++i) {
  2922. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  2923. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  2924. auto & layer = model.layers[i];
  2925. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2926. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2927. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2928. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  2929. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2930. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  2931. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2932. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  2933. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  2934. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  2935. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2936. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  2937. }
  2938. } break;
  2939. case LLM_ARCH_PERSIMMON:
  2940. {
  2941. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2942. {
  2943. ggml_backend_type backend_norm;
  2944. ggml_backend_type backend_output;
  2945. if (n_gpu_layers > int(n_layer)) {
  2946. backend_norm = llama_backend_offload;
  2947. backend_output = llama_backend_offload_split;
  2948. } else {
  2949. backend_norm = GGML_BACKEND_CPU;
  2950. backend_output = GGML_BACKEND_CPU;
  2951. }
  2952. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  2953. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  2954. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  2955. }
  2956. const uint32_t n_ff = hparams.n_ff;
  2957. const int64_t n_embd_gqa = n_embd_v_gqa;
  2958. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  2959. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  2960. const int i_gpu_start = n_layer - n_gpu_layers;
  2961. model.layers.resize(n_layer);
  2962. for (uint32_t i = 0; i < n_layer; ++i) {
  2963. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload;
  2964. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split;
  2965. auto & layer = model.layers[i];
  2966. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  2967. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  2968. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  2969. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  2970. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  2971. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  2972. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  2973. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  2974. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  2975. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  2976. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  2977. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  2978. layer.attn_q_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64}, backend);
  2979. layer.attn_q_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64}, backend);
  2980. layer.attn_k_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {64}, backend);
  2981. layer.attn_k_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64}, backend);
  2982. }
  2983. } break;
  2984. case LLM_ARCH_BLOOM:
  2985. {
  2986. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  2987. model.tok_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, GGML_BACKEND_CPU);
  2988. model.tok_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, GGML_BACKEND_CPU);
  2989. // output
  2990. {
  2991. ggml_backend_type backend_norm;
  2992. ggml_backend_type backend_output;
  2993. if (n_gpu_layers > int(n_layer)) {
  2994. backend_norm = llama_backend_offload;
  2995. backend_output = llama_backend_offload_split;
  2996. } else {
  2997. backend_norm = GGML_BACKEND_CPU;
  2998. backend_output = GGML_BACKEND_CPU;
  2999. }
  3000. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3001. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  3002. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3003. }
  3004. const uint32_t n_ff = hparams.n_ff;
  3005. const int64_t n_embd_gqa = n_embd_v_gqa;
  3006. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  3007. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  3008. const int i_gpu_start = n_layer - n_gpu_layers;
  3009. model.layers.resize(n_layer);
  3010. for (uint32_t i = 0; i < n_layer; ++i) {
  3011. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3012. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3013. auto & layer = model.layers[i];
  3014. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3015. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  3016. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  3017. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  3018. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3019. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  3020. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  3021. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  3022. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  3023. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  3024. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3025. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  3026. }
  3027. } break;
  3028. case LLM_ARCH_MPT:
  3029. {
  3030. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  3031. // output
  3032. {
  3033. ggml_backend_type backend_norm;
  3034. ggml_backend_type backend_output;
  3035. if (n_gpu_layers > int(n_layer)) {
  3036. backend_norm = llama_backend_offload;
  3037. backend_output = llama_backend_offload_split;
  3038. } else {
  3039. backend_norm = GGML_BACKEND_CPU;
  3040. backend_output = GGML_BACKEND_CPU;
  3041. }
  3042. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3043. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3044. }
  3045. const uint32_t n_ff = hparams.n_ff;
  3046. const int64_t n_embd_gqa = n_embd_v_gqa;
  3047. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  3048. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  3049. const int i_gpu_start = n_layer - n_gpu_layers;
  3050. model.layers.resize(n_layer);
  3051. for (uint32_t i = 0; i < n_layer; ++i) {
  3052. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3053. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3054. auto & layer = model.layers[i];
  3055. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3056. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  3057. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3058. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  3059. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  3060. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3061. // AWQ ScaleActivation layer
  3062. layer.ffn_act = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, backend, false);
  3063. }
  3064. } break;
  3065. case LLM_ARCH_STABLELM:
  3066. {
  3067. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  3068. // output
  3069. {
  3070. ggml_backend_type backend_norm;
  3071. ggml_backend_type backend_output;
  3072. if (n_gpu_layers > int(n_layer)) {
  3073. backend_norm = llama_backend_offload;
  3074. backend_output = llama_backend_offload_split;
  3075. } else {
  3076. backend_norm = GGML_BACKEND_CPU;
  3077. backend_output = GGML_BACKEND_CPU;
  3078. }
  3079. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  3080. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3081. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3082. }
  3083. const uint32_t n_ff = hparams.n_ff;
  3084. const int64_t n_embd_gqa = n_embd_v_gqa;
  3085. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  3086. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  3087. const int i_gpu_start = n_layer - n_gpu_layers;
  3088. model.layers.resize(n_layer);
  3089. for (uint32_t i = 0; i < n_layer; ++i) {
  3090. /*
  3091. llama_model_loader: - tensor 4: blk.0.attn_output.weight f16 [ 2560, 2560, 1, 1 ]
  3092. */
  3093. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3094. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3095. auto & layer = model.layers[i];
  3096. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3097. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  3098. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  3099. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  3100. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  3101. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3102. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  3103. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  3104. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  3105. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  3106. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3107. }
  3108. } break;
  3109. case LLM_ARCH_QWEN:
  3110. {
  3111. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  3112. {
  3113. ggml_backend_type backend_norm;
  3114. ggml_backend_type backend_output;
  3115. if (n_gpu_layers > int(n_layer)) {
  3116. backend_norm = llama_backend_offload;
  3117. backend_output = llama_backend_offload_split;
  3118. } else {
  3119. backend_norm = GGML_BACKEND_CPU;
  3120. backend_output = GGML_BACKEND_CPU;
  3121. }
  3122. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3123. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3124. }
  3125. const uint32_t n_ff = hparams.n_ff / 2;
  3126. const int i_gpu_start = n_layer - n_gpu_layers;
  3127. model.layers.resize(n_layer);
  3128. for (uint32_t i = 0; i < n_layer; ++i) {
  3129. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3130. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3131. auto & layer = model.layers[i];
  3132. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3133. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd * 3}, backend_split);
  3134. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd * 3}, backend);
  3135. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3136. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  3137. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  3138. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  3139. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3140. }
  3141. } break;
  3142. case LLM_ARCH_PHI2:
  3143. {
  3144. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  3145. // output
  3146. {
  3147. ggml_backend_type backend_norm;
  3148. ggml_backend_type backend_output;
  3149. if (n_gpu_layers > int(n_layer)) {
  3150. backend_norm = llama_backend_offload;
  3151. backend_output = llama_backend_offload;
  3152. } else {
  3153. backend_norm = GGML_BACKEND_CPU;
  3154. backend_output = GGML_BACKEND_CPU;
  3155. }
  3156. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3157. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  3158. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3159. model.output_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, backend_output);
  3160. }
  3161. const uint32_t n_ff = hparams.n_ff;
  3162. const int64_t n_embd_gqa = n_embd_v_gqa;
  3163. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  3164. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  3165. const int i_gpu_start = n_layer - n_gpu_layers;
  3166. model.layers.resize(n_layer);
  3167. for (uint32_t i = 0; i < n_layer; ++i) {
  3168. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3169. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3170. auto & layer = model.layers[i];
  3171. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3172. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  3173. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  3174. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  3175. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3176. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  3177. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  3178. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  3179. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3180. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  3181. }
  3182. } break;
  3183. case LLM_ARCH_PLAMO:
  3184. {
  3185. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  3186. // output
  3187. {
  3188. ggml_backend_type backend_norm;
  3189. ggml_backend_type backend_output;
  3190. if (n_gpu_layers > int(n_layer)) {
  3191. backend_norm = llama_backend_offload;
  3192. backend_output = llama_backend_offload_split;
  3193. } else {
  3194. backend_norm = GGML_BACKEND_CPU;
  3195. backend_output = GGML_BACKEND_CPU;
  3196. }
  3197. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3198. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3199. }
  3200. const uint32_t n_ff = hparams.n_ff;
  3201. const int64_t n_embd_gqa = n_embd_v_gqa;
  3202. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  3203. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  3204. const int i_gpu_start = n_layer - n_gpu_layers;
  3205. model.layers.resize(n_layer);
  3206. for (uint32_t i = 0; i < n_layer; ++i) {
  3207. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3208. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3209. auto & layer = model.layers[i];
  3210. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3211. layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
  3212. layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  3213. layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
  3214. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3215. layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
  3216. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
  3217. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3218. }
  3219. } break;
  3220. case LLM_ARCH_GPT2:
  3221. {
  3222. model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
  3223. model.pos_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU);
  3224. // output
  3225. {
  3226. ggml_backend_type backend_norm;
  3227. ggml_backend_type backend_output;
  3228. if (n_gpu_layers > int(n_layer)) {
  3229. backend_norm = llama_backend_offload;
  3230. backend_output = llama_backend_offload_split;
  3231. } else {
  3232. backend_norm = GGML_BACKEND_CPU;
  3233. backend_output = GGML_BACKEND_CPU;
  3234. }
  3235. model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
  3236. model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
  3237. model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
  3238. }
  3239. const uint32_t n_ff = hparams.n_ff;
  3240. const int64_t n_embd_gqa = n_embd_v_gqa;
  3241. GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa());
  3242. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  3243. const int i_gpu_start = n_layer - n_gpu_layers;
  3244. model.layers.resize(n_layer);
  3245. for (uint32_t i = 0; i < n_layer; ++i) {
  3246. const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
  3247. const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
  3248. auto & layer = model.layers[i];
  3249. layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
  3250. layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
  3251. layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
  3252. layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
  3253. layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
  3254. layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
  3255. layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
  3256. layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
  3257. layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
  3258. layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
  3259. layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
  3260. layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
  3261. }
  3262. } break;
  3263. default:
  3264. throw std::runtime_error("unknown architecture");
  3265. }
  3266. }
  3267. ml.done_getting_tensors();
  3268. ml.init_mapping();
  3269. // allocate tensors
  3270. size_t vram_weights = 0;
  3271. size_t buf_size = 0;
  3272. ggml_backend_buffer_type_t buft = llama_default_buffer_type(n_gpu_layers);
  3273. for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  3274. // GGML_BACKEND_GPU tensors are for CUDA and OpenCL only, which are handled separately without ggml-backend
  3275. if (t->backend == GGML_BACKEND_CPU) {
  3276. buf_size += GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), ggml_backend_buft_get_alignment(buft));
  3277. } else {
  3278. vram_weights += ggml_nbytes(t);
  3279. }
  3280. }
  3281. // create backend buffer
  3282. ggml_backend_buffer_t buf_mmap = nullptr;
  3283. #ifdef GGML_USE_METAL
  3284. if (n_gpu_layers > 0) {
  3285. if (ml.use_mmap) {
  3286. const size_t max_size = ggml_get_max_tensor_size(ctx);
  3287. model.buf = ggml_backend_metal_buffer_from_ptr(ml.mapping->addr, ml.mapping->size, max_size);
  3288. buf_mmap = model.buf;
  3289. } else {
  3290. model.buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_metal_buffer_type());
  3291. }
  3292. }
  3293. #elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  3294. // for testing only
  3295. if (n_gpu_layers > 0) {
  3296. model.buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_cuda_buffer_type(0));
  3297. }
  3298. #endif
  3299. if (model.buf == nullptr) {
  3300. // CPU backend, and indirectly CUDA and OpenCL
  3301. if (ml.use_mmap) {
  3302. model.buf = ggml_backend_cpu_buffer_from_ptr(ml.mapping->addr, ml.mapping->size);
  3303. buf_mmap = model.buf;
  3304. } else {
  3305. // allocate only CPU tensors
  3306. model.buf = ggml_backend_buft_alloc_buffer(buft, buf_size);
  3307. ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(model.buf);
  3308. for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
  3309. if (t->backend == GGML_BACKEND_CPU) {
  3310. ggml_tallocr_alloc(alloc, t);
  3311. }
  3312. }
  3313. ggml_tallocr_free(alloc);
  3314. }
  3315. }
  3316. if (use_mlock && ggml_backend_buffer_is_host(model.buf)) {
  3317. model.mlock_buf.init (ggml_backend_buffer_get_base(model.buf));
  3318. model.mlock_buf.grow_to(ggml_backend_buffer_get_size(model.buf));
  3319. }
  3320. // print memory requirements
  3321. {
  3322. size_t sys_mem_required = ctx_size + buf_size;
  3323. if (sys_mem_required > 0) {
  3324. LLAMA_LOG_INFO("%s: system memory used = %7.2f MiB\n", __func__, sys_mem_required / 1024.0 / 1024.0);
  3325. }
  3326. if (vram_weights > 0) {
  3327. LLAMA_LOG_INFO("%s: VRAM used = %7.2f MiB\n", __func__, vram_weights / 1024.0 / 1024.0);
  3328. }
  3329. #if (defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)) || defined(GGML_USE_CLBLAST)
  3330. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  3331. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  3332. if (n_gpu_layers > (int) hparams.n_layer) {
  3333. LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
  3334. }
  3335. const int max_backend_supported_layers = hparams.n_layer + 1;
  3336. const int max_offloadable_layers = hparams.n_layer + 1;
  3337. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  3338. #endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  3339. }
  3340. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  3341. ggml_cuda_set_tensor_split(tensor_split);
  3342. #else
  3343. GGML_UNUSED(tensor_split);
  3344. #endif // GGML_USE_CUBLAS
  3345. // populate tensors_by_name
  3346. for (int i = 0; i < ml.n_tensors; ++i) {
  3347. struct ggml_tensor * cur = ggml_get_tensor(ctx, ml.get_tensor_name(i));
  3348. model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
  3349. }
  3350. if (!ml.load_all_data(ctx, progress_callback, progress_callback_user_data, buf_mmap, use_mlock ? &model.mlock_mmap : NULL)) {
  3351. return false;
  3352. }
  3353. model.mapping = std::move(ml.mapping);
  3354. // loading time will be recalculate after the first eval, so
  3355. // we take page faults deferred by mmap() into consideration
  3356. model.t_load_us = ggml_time_us() - model.t_start_us;
  3357. return true;
  3358. }
  3359. // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
  3360. static int llama_model_load(const std::string & fname, llama_model & model, const llama_model_params & params) {
  3361. try {
  3362. llama_model_loader ml(fname, params.use_mmap, params.kv_overrides);
  3363. model.hparams.vocab_only = params.vocab_only;
  3364. llm_load_arch (ml, model);
  3365. llm_load_hparams(ml, model);
  3366. llm_load_vocab (ml, model);
  3367. llm_load_print_meta(ml, model);
  3368. if (model.hparams.n_vocab != model.vocab.id_to_token.size()) {
  3369. throw std::runtime_error("vocab size mismatch");
  3370. }
  3371. if (params.vocab_only) {
  3372. LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
  3373. return 0;
  3374. }
  3375. if (!llm_load_tensors(
  3376. ml, model, params.n_gpu_layers, params.main_gpu, params.tensor_split, params.use_mlock,
  3377. params.progress_callback, params.progress_callback_user_data
  3378. )) {
  3379. return -2;
  3380. }
  3381. } catch (const std::exception & err) {
  3382. LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
  3383. return -1;
  3384. }
  3385. return 0;
  3386. }
  3387. //
  3388. // llm_build
  3389. //
  3390. using llm_build_cb = std::function<void(struct ggml_tensor * cur, const char * name, int nl)>;
  3391. enum llm_rope_type {
  3392. LLM_ROPE,
  3393. LLM_ROPE_NEOX,
  3394. LLM_ROPE_GLM,
  3395. };
  3396. enum llm_ffn_op_type {
  3397. LLM_FFN_SILU,
  3398. LLM_FFN_GELU,
  3399. LLM_FFN_RELU,
  3400. LLM_FFN_RELU_SQR,
  3401. };
  3402. enum llm_ffn_gate_type {
  3403. LLM_FFN_SEQ,
  3404. LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
  3405. };
  3406. enum llm_norm_type {
  3407. LLM_NORM,
  3408. LLM_NORM_RMS,
  3409. };
  3410. static struct ggml_tensor * llm_build_inp_embd(
  3411. struct ggml_context * ctx,
  3412. const llama_hparams & hparams,
  3413. const llama_batch & batch,
  3414. struct ggml_tensor * tok_embd,
  3415. const llm_build_cb & cb) {
  3416. const int64_t n_embd = hparams.n_embd;
  3417. struct ggml_tensor * inpL;
  3418. if (batch.token) {
  3419. struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
  3420. cb(inp_tokens, "inp_tokens", -1);
  3421. inpL = ggml_get_rows(ctx, tok_embd, inp_tokens);
  3422. } else {
  3423. #ifdef GGML_USE_MPI
  3424. GGML_ASSERT(false && "not implemented");
  3425. #endif
  3426. inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
  3427. }
  3428. return inpL;
  3429. }
  3430. // Persimmon: n_rot = n_embd_head_k/2
  3431. // Other: n_rot = n_embd_head_k
  3432. static void llm_build_k_shift(
  3433. struct ggml_context * ctx,
  3434. const llama_hparams & hparams,
  3435. const llama_cparams & cparams,
  3436. const llama_kv_cache & kv,
  3437. struct ggml_cgraph * graph,
  3438. llm_rope_type type,
  3439. int64_t n_ctx,
  3440. int n_rot,
  3441. float freq_base,
  3442. float freq_scale,
  3443. const llm_build_cb & cb) {
  3444. const int64_t n_layer = hparams.n_layer;
  3445. const int64_t n_head_kv = hparams.n_head_kv;
  3446. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  3447. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  3448. const int32_t n_orig_ctx = cparams.n_yarn_orig_ctx;
  3449. const float ext_factor = cparams.yarn_ext_factor;
  3450. const float attn_factor = cparams.yarn_attn_factor;
  3451. const float beta_fast = cparams.yarn_beta_fast;
  3452. const float beta_slow = cparams.yarn_beta_slow;
  3453. GGML_ASSERT(n_embd_head_k % n_rot == 0);
  3454. struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_ctx);
  3455. cb(K_shift, "K_shift", -1);
  3456. int rope_type = 0;
  3457. switch (type) {
  3458. case LLM_ROPE: rope_type = 0; break;
  3459. case LLM_ROPE_NEOX: rope_type = 2; break;
  3460. case LLM_ROPE_GLM: rope_type = 4; break;
  3461. }
  3462. for (int il = 0; il < n_layer; ++il) {
  3463. struct ggml_tensor * tmp =
  3464. // we rotate only the first n_rot dimensions
  3465. ggml_rope_custom_inplace(ctx,
  3466. ggml_view_3d(ctx, kv.k_l[il],
  3467. n_embd_head_k, n_head_kv, n_ctx,
  3468. ggml_row_size(kv.k_l[il]->type, n_embd_head_k),
  3469. ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa),
  3470. 0),
  3471. K_shift, n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  3472. ext_factor, attn_factor, beta_fast, beta_slow);
  3473. cb(tmp, "K_shifted", il);
  3474. ggml_build_forward_expand(graph, tmp);
  3475. }
  3476. }
  3477. static void llm_build_kv_store(
  3478. struct ggml_context * ctx,
  3479. const llama_hparams & hparams,
  3480. const llama_kv_cache & kv,
  3481. struct ggml_cgraph * graph,
  3482. struct ggml_tensor * k_cur,
  3483. struct ggml_tensor * v_cur,
  3484. int64_t n_ctx,
  3485. int32_t n_tokens,
  3486. int32_t kv_head,
  3487. const llm_build_cb & cb,
  3488. int64_t il) {
  3489. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  3490. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  3491. // compute the transposed [n_tokens, n_embd] V matrix
  3492. struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, n_embd_v_gqa, n_tokens));
  3493. //struct ggml_tensor * v_cur_t = ggml_transpose(ctx, v_cur); // TODO: reshape above is likely not needed
  3494. cb(v_cur_t, "v_cur_t", il);
  3495. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_k_gqa,
  3496. (ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa))*kv_head);
  3497. cb(k_cache_view, "k_cache_view", il);
  3498. struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa,
  3499. ( n_ctx)*ggml_element_size(kv.v_l[il]),
  3500. (kv_head)*ggml_element_size(kv.v_l[il]));
  3501. cb(v_cache_view, "v_cache_view", il);
  3502. // important: storing RoPE-ed version of K in the KV cache!
  3503. ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view));
  3504. ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur_t, v_cache_view));
  3505. }
  3506. static struct ggml_tensor * llm_build_norm(
  3507. struct ggml_context * ctx,
  3508. struct ggml_tensor * cur,
  3509. const llama_hparams & hparams,
  3510. struct ggml_tensor * mw,
  3511. struct ggml_tensor * mb,
  3512. llm_norm_type type,
  3513. const llm_build_cb & cb,
  3514. int il) {
  3515. switch (type) {
  3516. case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break;
  3517. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break;
  3518. }
  3519. if (mw || mb) {
  3520. cb(cur, "norm", il);
  3521. }
  3522. if (mw) {
  3523. cur = ggml_mul(ctx, cur, mw);
  3524. if (mb) {
  3525. cb(cur, "norm_w", il);
  3526. }
  3527. }
  3528. if (mb) {
  3529. cur = ggml_add(ctx, cur, mb);
  3530. }
  3531. return cur;
  3532. }
  3533. static struct ggml_tensor * llm_build_ffn(
  3534. struct ggml_context * ctx,
  3535. struct ggml_tensor * cur,
  3536. struct ggml_tensor * up,
  3537. struct ggml_tensor * up_b,
  3538. struct ggml_tensor * gate,
  3539. struct ggml_tensor * gate_b,
  3540. struct ggml_tensor * down,
  3541. struct ggml_tensor * down_b,
  3542. struct ggml_tensor * act_scales,
  3543. llm_ffn_op_type type_op,
  3544. llm_ffn_gate_type type_gate,
  3545. const llm_build_cb & cb,
  3546. int il) {
  3547. struct ggml_tensor * tmp = ggml_mul_mat(ctx, up, cur);
  3548. cb(tmp, "ffn_up", il);
  3549. if (up_b) {
  3550. tmp = ggml_add(ctx, tmp, up_b);
  3551. cb(tmp, "ffn_up_b", il);
  3552. }
  3553. if (gate) {
  3554. switch (type_gate) {
  3555. case LLM_FFN_SEQ:
  3556. {
  3557. cur = ggml_mul_mat(ctx, gate, tmp);
  3558. cb(cur, "ffn_gate", il);
  3559. } break;
  3560. case LLM_FFN_PAR:
  3561. {
  3562. cur = ggml_mul_mat(ctx, gate, cur);
  3563. cb(cur, "ffn_gate", il);
  3564. } break;
  3565. }
  3566. if (gate_b) {
  3567. cur = ggml_add(ctx, cur, gate_b);
  3568. cb(cur, "ffn_gate_b", il);
  3569. }
  3570. } else {
  3571. cur = tmp;
  3572. }
  3573. switch (type_op) {
  3574. case LLM_FFN_SILU:
  3575. {
  3576. cur = ggml_silu(ctx, cur);
  3577. cb(cur, "ffn_silu", il);
  3578. } break;
  3579. case LLM_FFN_GELU:
  3580. {
  3581. cur = ggml_gelu(ctx, cur);
  3582. cb(cur, "ffn_gelu", il);
  3583. if (act_scales != NULL) {
  3584. cur = ggml_div(ctx, cur, act_scales);
  3585. cb(cur, "ffn_act", il);
  3586. }
  3587. } break;
  3588. case LLM_FFN_RELU:
  3589. {
  3590. cur = ggml_relu(ctx, cur);
  3591. cb(cur, "ffn_relu", il);
  3592. } break;
  3593. case LLM_FFN_RELU_SQR:
  3594. {
  3595. cur = ggml_relu(ctx, cur);
  3596. cb(cur, "ffn_relu", il);
  3597. cur = ggml_sqr(ctx, cur);
  3598. cb(cur, "ffn_sqr(relu)", il);
  3599. } break;
  3600. }
  3601. if (type_gate == LLM_FFN_PAR) {
  3602. cur = ggml_mul(ctx, cur, tmp);
  3603. cb(cur, "ffn_gate_par", il);
  3604. }
  3605. cur = ggml_mul_mat(ctx, down, cur);
  3606. if (down_b) {
  3607. cb(cur, "ffn_down", il);
  3608. }
  3609. if (down_b) {
  3610. cur = ggml_add(ctx, cur, down_b);
  3611. }
  3612. return cur;
  3613. }
  3614. // if max_alibi_bias > 0 then apply ALiBi
  3615. static struct ggml_tensor * llm_build_kqv(
  3616. struct ggml_context * ctx,
  3617. const llama_model & model,
  3618. const llama_hparams & hparams,
  3619. const llama_kv_cache & kv,
  3620. struct ggml_tensor * wo,
  3621. struct ggml_tensor * wo_b,
  3622. struct ggml_tensor * q_cur,
  3623. struct ggml_tensor * kq_mask,
  3624. int64_t n_ctx,
  3625. int32_t n_tokens,
  3626. int32_t n_kv,
  3627. float max_alibi_bias,
  3628. float kq_scale,
  3629. const llm_build_cb & cb,
  3630. int il) {
  3631. const int64_t n_head = hparams.n_head;
  3632. const int64_t n_head_kv = hparams.n_head_kv;
  3633. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  3634. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  3635. const int64_t n_embd_head_v = hparams.n_embd_head_v;
  3636. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  3637. cb(q, "q", il);
  3638. struct ggml_tensor * k =
  3639. ggml_view_3d(ctx, kv.k_l[il],
  3640. n_embd_head_k, n_kv, n_head_kv,
  3641. ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa),
  3642. ggml_row_size(kv.k_l[il]->type, n_embd_head_k),
  3643. 0);
  3644. cb(k, "k", il);
  3645. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  3646. cb(kq, "kq", il);
  3647. if (model.arch == LLM_ARCH_PHI2) {
  3648. // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
  3649. // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
  3650. ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  3651. }
  3652. if (max_alibi_bias > 0.0f) {
  3653. // temporary branch until we figure out how to handle ggml_alibi through ggml_add
  3654. kq = ggml_scale(ctx, kq, kq_scale);
  3655. cb(kq, "kq_scaled", il);
  3656. if (max_alibi_bias > 0.0f) {
  3657. // TODO: n_head or n_head_kv
  3658. // TODO: K-shift is likely not working
  3659. // TODO: change to ggml_add
  3660. kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, max_alibi_bias);
  3661. cb(kq, "kq_scaled_alibi", il);
  3662. }
  3663. kq = ggml_add(ctx, kq, kq_mask);
  3664. cb(kq, "kq_masked", il);
  3665. kq = ggml_soft_max(ctx, kq);
  3666. cb(kq, "kq_soft_max", il);
  3667. } else {
  3668. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale);
  3669. cb(kq, "kq_soft_max_ext", il);
  3670. }
  3671. // split cached v into n_head heads
  3672. struct ggml_tensor * v =
  3673. ggml_view_3d(ctx, kv.v_l[il],
  3674. n_kv, n_embd_head_v, n_head_kv,
  3675. ggml_element_size(kv.v_l[il])*n_ctx,
  3676. ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head_v,
  3677. 0);
  3678. cb(v, "v", il);
  3679. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  3680. cb(kqv, "kqv", il);
  3681. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  3682. cb(kqv_merged, "kqv_merged", il);
  3683. struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_k*n_head, n_tokens);
  3684. cb(cur, "kqv_merged_cont", il);
  3685. cur = ggml_mul_mat(ctx, wo, cur);
  3686. if (wo_b) {
  3687. cb(cur, "kqv_wo", il);
  3688. }
  3689. if (wo_b) {
  3690. cur = ggml_add(ctx, cur, wo_b);
  3691. }
  3692. return cur;
  3693. }
  3694. struct llm_build_context {
  3695. const llama_model & model;
  3696. const llama_hparams & hparams;
  3697. const llama_cparams & cparams;
  3698. const llama_batch & batch;
  3699. const llama_kv_cache & kv_self;
  3700. const int64_t n_embd;
  3701. const int64_t n_layer;
  3702. const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
  3703. const int64_t n_head;
  3704. const int64_t n_head_kv;
  3705. const int64_t n_embd_head_k;
  3706. const int64_t n_embd_k_gqa;
  3707. const int64_t n_embd_head_v;
  3708. const int64_t n_embd_v_gqa;
  3709. const int64_t n_expert;
  3710. const int64_t n_expert_used;
  3711. const float freq_base;
  3712. const float freq_scale;
  3713. const float ext_factor;
  3714. const float attn_factor;
  3715. const float beta_fast;
  3716. const float beta_slow;
  3717. const float norm_eps;
  3718. const float norm_rms_eps;
  3719. const int32_t n_tokens;
  3720. const int32_t n_kv; // size of KV cache to consider (n_kv <= n_ctx)
  3721. const int32_t kv_head; // index of where we store new KV data in the cache
  3722. const int32_t n_orig_ctx;
  3723. const bool do_rope_shift;
  3724. const llm_build_cb & cb;
  3725. std::vector<uint8_t> & buf_compute_meta;
  3726. struct ggml_context * ctx0 = nullptr;
  3727. // TODO: consider making the entire interface noexcept
  3728. llm_build_context(
  3729. llama_context & lctx,
  3730. const llama_batch & batch,
  3731. const llm_build_cb & cb,
  3732. bool worst_case) :
  3733. model (lctx.model),
  3734. hparams (model.hparams),
  3735. cparams (lctx.cparams),
  3736. batch (batch),
  3737. kv_self (lctx.kv_self),
  3738. n_embd (hparams.n_embd),
  3739. n_layer (hparams.n_layer),
  3740. n_ctx (cparams.n_ctx),
  3741. n_head (hparams.n_head),
  3742. n_head_kv (hparams.n_head_kv),
  3743. n_embd_head_k (hparams.n_embd_head_k),
  3744. n_embd_k_gqa (hparams.n_embd_k_gqa()),
  3745. n_embd_head_v (hparams.n_embd_head_v),
  3746. n_embd_v_gqa (hparams.n_embd_v_gqa()),
  3747. n_expert (hparams.n_expert),
  3748. n_expert_used (hparams.n_expert_used),
  3749. freq_base (cparams.rope_freq_base),
  3750. freq_scale (cparams.rope_freq_scale),
  3751. ext_factor (cparams.yarn_ext_factor),
  3752. attn_factor (cparams.yarn_attn_factor),
  3753. beta_fast (cparams.yarn_beta_fast),
  3754. beta_slow (cparams.yarn_beta_slow),
  3755. norm_eps (hparams.f_norm_eps),
  3756. norm_rms_eps (hparams.f_norm_rms_eps),
  3757. n_tokens (batch.n_tokens),
  3758. n_kv (worst_case ? n_ctx : kv_self.n),
  3759. kv_head (worst_case ? n_ctx - n_tokens : kv_self.head),
  3760. n_orig_ctx (cparams.n_yarn_orig_ctx),
  3761. do_rope_shift (worst_case || kv_self.has_shift),
  3762. cb (cb),
  3763. buf_compute_meta (lctx.buf_compute_meta) {
  3764. GGML_ASSERT(!!kv_self.ctx);
  3765. // all initializations should be done in init()
  3766. }
  3767. void init() {
  3768. struct ggml_init_params params = {
  3769. /*.mem_size =*/ buf_compute_meta.size(),
  3770. /*.mem_buffer =*/ buf_compute_meta.data(),
  3771. /*.no_alloc =*/ true,
  3772. };
  3773. ctx0 = ggml_init(params);
  3774. }
  3775. void free() {
  3776. if (ctx0) {
  3777. ggml_free(ctx0);
  3778. ctx0 = nullptr;
  3779. }
  3780. }
  3781. struct ggml_cgraph * build_llama() {
  3782. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3783. const int64_t n_embd_head = hparams.n_embd_head_v;
  3784. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  3785. GGML_ASSERT(n_embd_head == hparams.n_rot);
  3786. struct ggml_tensor * cur;
  3787. struct ggml_tensor * inpL;
  3788. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3789. cb(inpL, "inp_embd", -1);
  3790. // inp_pos - contains the positions
  3791. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3792. cb(inp_pos, "inp_pos", -1);
  3793. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3794. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3795. cb(KQ_mask, "KQ_mask", -1);
  3796. // shift the entire K-cache if needed
  3797. if (do_rope_shift) {
  3798. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  3799. }
  3800. for (int il = 0; il < n_layer; ++il) {
  3801. struct ggml_tensor * inpSA = inpL;
  3802. // norm
  3803. cur = llm_build_norm(ctx0, inpL, hparams,
  3804. model.layers[il].attn_norm, NULL,
  3805. LLM_NORM_RMS, cb, il);
  3806. cb(cur, "attn_norm", il);
  3807. // self-attention
  3808. {
  3809. // compute Q and K and RoPE them
  3810. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  3811. cb(Qcur, "Qcur", il);
  3812. if (model.layers[il].bq) {
  3813. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  3814. cb(Qcur, "Qcur", il);
  3815. }
  3816. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  3817. cb(Kcur, "Kcur", il);
  3818. if (model.layers[il].bk) {
  3819. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  3820. cb(Kcur, "Kcur", il);
  3821. }
  3822. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  3823. cb(Vcur, "Vcur", il);
  3824. if (model.layers[il].bv) {
  3825. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  3826. cb(Vcur, "Vcur", il);
  3827. }
  3828. Qcur = ggml_rope_custom(
  3829. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  3830. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3831. ext_factor, attn_factor, beta_fast, beta_slow
  3832. );
  3833. cb(Qcur, "Qcur", il);
  3834. Kcur = ggml_rope_custom(
  3835. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  3836. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3837. ext_factor, attn_factor, beta_fast, beta_slow
  3838. );
  3839. cb(Kcur, "Kcur", il);
  3840. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3841. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  3842. model.layers[il].wo, model.layers[il].bo,
  3843. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  3844. cb(cur, "kqv_out", il);
  3845. }
  3846. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  3847. cb(ffn_inp, "ffn_inp", il);
  3848. // feed-forward network
  3849. if (model.layers[il].ffn_gate_inp == nullptr) {
  3850. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3851. model.layers[il].ffn_norm, NULL,
  3852. LLM_NORM_RMS, cb, il);
  3853. cb(cur, "ffn_norm", il);
  3854. cur = llm_build_ffn(ctx0, cur,
  3855. model.layers[il].ffn_up, NULL,
  3856. model.layers[il].ffn_gate, NULL,
  3857. model.layers[il].ffn_down, NULL,
  3858. NULL,
  3859. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  3860. cb(cur, "ffn_out", il);
  3861. } else {
  3862. // MoE branch
  3863. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3864. model.layers[il].ffn_norm, NULL,
  3865. LLM_NORM_RMS, cb, il);
  3866. cb(cur, "ffn_norm", il);
  3867. ggml_tensor * logits = ggml_mul_mat(ctx0, model.layers[il].ffn_gate_inp, cur); // [n_tokens, num_experts]
  3868. cb(logits, "ffn_moe_logits", il);
  3869. ggml_tensor * probs = ggml_soft_max(ctx0, logits); // [n_tokens, num_experts]
  3870. cb(probs, "ffn_moe_probs", il);
  3871. // select experts
  3872. ggml_tensor * selected_experts = ggml_top_k(ctx0, probs, n_expert_used); // [n_tokens, num_experts_per_tok]
  3873. cb(selected_experts->src[0], "ffn_moe_argsort", il);
  3874. ggml_tensor * weights = ggml_get_rows(ctx0,
  3875. ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts);
  3876. cb(weights, "ffn_moe_weights", il);
  3877. weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); // [n_tokens, num_experts_per_tok]
  3878. ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights);
  3879. cb(weights_sum, "ffn_moe_weights_sum", il);
  3880. weights = ggml_div(ctx0, weights, weights_sum); // [n_tokens, num_experts_per_tok]
  3881. cb(weights, "ffn_moe_weights_norm", il);
  3882. // compute expert outputs
  3883. ggml_tensor * moe_out = nullptr;
  3884. for (int i = 0; i < n_expert_used; ++i) {
  3885. ggml_tensor * cur_expert;
  3886. ggml_tensor * cur_up = ggml_mul_mat_id(ctx0, model.layers[il].ffn_up_exp, n_expert, selected_experts, i, cur);
  3887. cb(cur_up, "ffn_moe_up", il);
  3888. ggml_tensor * cur_gate = ggml_mul_mat_id(ctx0, model.layers[il].ffn_gate_exp, n_expert, selected_experts, i, cur);
  3889. cb(cur_gate, "ffn_moe_gate", il);
  3890. cur_gate = ggml_silu(ctx0, cur_gate);
  3891. cb(cur_gate, "ffn_moe_silu", il);
  3892. cur_expert = ggml_mul(ctx0, cur_up, cur_gate); // [n_tokens, n_embd]
  3893. cb(cur_expert, "ffn_moe_gate_par", il);
  3894. cur_expert = ggml_mul_mat_id(ctx0, model.layers[il].ffn_down_exp, n_expert, selected_experts, i, cur_expert); // [n_tokens, n_embd]
  3895. cb(cur_expert, "ffn_moe_down", il);
  3896. cur_expert = ggml_mul(ctx0, cur_expert,
  3897. ggml_view_2d(ctx0, weights, 1, n_tokens, weights->nb[1], i*weights->nb[0]));
  3898. cb(cur_expert, "ffn_moe_weighted", il);
  3899. if (i == 0) {
  3900. moe_out = cur_expert;
  3901. } else {
  3902. moe_out = ggml_add(ctx0, moe_out, cur_expert);
  3903. cb(moe_out, "ffn_moe_out", il);
  3904. }
  3905. }
  3906. cur = moe_out;
  3907. }
  3908. cur = ggml_add(ctx0, cur, ffn_inp);
  3909. cb(cur, "l_out", il);
  3910. // input for next layer
  3911. inpL = cur;
  3912. }
  3913. cur = inpL;
  3914. cur = llm_build_norm(ctx0, cur, hparams,
  3915. model.output_norm, NULL,
  3916. LLM_NORM_RMS, cb, -1);
  3917. cb(cur, "result_norm", -1);
  3918. // lm_head
  3919. cur = ggml_mul_mat(ctx0, model.output, cur);
  3920. cb(cur, "result_output", -1);
  3921. ggml_build_forward_expand(gf, cur);
  3922. return gf;
  3923. }
  3924. struct ggml_cgraph * build_baichuan() {
  3925. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  3926. const int64_t n_embd_head = hparams.n_embd_head_v;
  3927. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  3928. struct ggml_tensor * cur;
  3929. struct ggml_tensor * inpL;
  3930. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  3931. cb(inpL, "inp_embd", -1);
  3932. // inp_pos - contains the positions
  3933. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  3934. cb(inp_pos, "inp_pos", -1);
  3935. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  3936. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  3937. cb(KQ_mask, "KQ_mask", -1);
  3938. // shift the entire K-cache if needed
  3939. if (do_rope_shift) {
  3940. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  3941. }
  3942. for (int il = 0; il < n_layer; ++il) {
  3943. struct ggml_tensor * inpSA = inpL;
  3944. cur = llm_build_norm(ctx0, inpL, hparams,
  3945. model.layers[il].attn_norm, NULL,
  3946. LLM_NORM_RMS, cb, il);
  3947. cb(cur, "attn_norm", il);
  3948. // self-attention
  3949. {
  3950. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  3951. cb(Qcur, "Qcur", il);
  3952. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  3953. cb(Kcur, "Kcur", il);
  3954. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  3955. cb(Vcur, "Vcur", il);
  3956. switch (model.type) {
  3957. case MODEL_7B:
  3958. Qcur = ggml_rope_custom(
  3959. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  3960. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3961. ext_factor, attn_factor, beta_fast, beta_slow
  3962. );
  3963. Kcur = ggml_rope_custom(
  3964. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  3965. n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
  3966. ext_factor, attn_factor, beta_fast, beta_slow
  3967. );
  3968. break;
  3969. case MODEL_13B:
  3970. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens);
  3971. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens);
  3972. break;
  3973. default:
  3974. GGML_ASSERT(false);
  3975. }
  3976. cb(Qcur, "Qcur", il);
  3977. cb(Kcur, "Kcur", il);
  3978. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  3979. // apply ALiBi for 13B model
  3980. const float max_alibi_bias = model.type == MODEL_13B ? 8.0f : -1.0f;
  3981. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  3982. model.layers[il].wo, NULL,
  3983. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  3984. cb(cur, "kqv_out", il);
  3985. }
  3986. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  3987. cb(ffn_inp, "ffn_inp", il);
  3988. // feed-forward network
  3989. {
  3990. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  3991. model.layers[il].ffn_norm, NULL,
  3992. LLM_NORM_RMS, cb, il);
  3993. cb(cur, "ffn_norm", il);
  3994. cur = llm_build_ffn(ctx0, cur,
  3995. model.layers[il].ffn_up, NULL,
  3996. model.layers[il].ffn_gate, NULL,
  3997. model.layers[il].ffn_down, NULL,
  3998. NULL,
  3999. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4000. cb(cur, "ffn_out", il);
  4001. }
  4002. cur = ggml_add(ctx0, cur, ffn_inp);
  4003. cb(cur, "l_out", il);
  4004. // input for next layer
  4005. inpL = cur;
  4006. }
  4007. cur = inpL;
  4008. cur = llm_build_norm(ctx0, cur, hparams,
  4009. model.output_norm, NULL,
  4010. LLM_NORM_RMS, cb, -1);
  4011. cb(cur, "result_norm", -1);
  4012. // lm_head
  4013. cur = ggml_mul_mat(ctx0, model.output, cur);
  4014. cb(cur, "result_output", -1);
  4015. ggml_build_forward_expand(gf, cur);
  4016. return gf;
  4017. }
  4018. struct ggml_cgraph * build_falcon() {
  4019. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4020. const int64_t n_embd_head = hparams.n_embd_head_v;
  4021. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4022. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4023. struct ggml_tensor * cur;
  4024. struct ggml_tensor * inpL;
  4025. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4026. cb(inpL, "inp_embd", -1);
  4027. // inp_pos - contains the positions
  4028. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4029. cb(inp_pos, "inp_pos", -1);
  4030. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4031. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4032. cb(KQ_mask, "KQ_mask", -1);
  4033. // shift the entire K-cache if needed
  4034. if (do_rope_shift) {
  4035. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  4036. }
  4037. for (int il = 0; il < n_layer; ++il) {
  4038. struct ggml_tensor * attn_norm;
  4039. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  4040. model.layers[il].attn_norm,
  4041. model.layers[il].attn_norm_b,
  4042. LLM_NORM, cb, il);
  4043. cb(attn_norm, "attn_norm", il);
  4044. // self-attention
  4045. {
  4046. if (model.layers[il].attn_norm_2) {
  4047. // Falcon-40B
  4048. cur = llm_build_norm(ctx0, inpL, hparams,
  4049. model.layers[il].attn_norm_2,
  4050. model.layers[il].attn_norm_2_b,
  4051. LLM_NORM, cb, il);
  4052. cb(cur, "attn_norm_2", il);
  4053. } else {
  4054. cur = attn_norm;
  4055. }
  4056. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4057. cb(cur, "wqkv", il);
  4058. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4059. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4060. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4061. cb(Qcur, "Qcur", il);
  4062. cb(Kcur, "Kcur", il);
  4063. cb(Vcur, "Vcur", il);
  4064. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4065. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4066. // using mode = 2 for neox mode
  4067. Qcur = ggml_rope_custom(
  4068. ctx0, Qcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
  4069. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4070. );
  4071. cb(Qcur, "Qcur", il);
  4072. Kcur = ggml_rope_custom(
  4073. ctx0, Kcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
  4074. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4075. );
  4076. cb(Kcur, "Kcur", il);
  4077. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4078. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4079. model.layers[il].wo, NULL,
  4080. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4081. cb(cur, "kqv_out", il);
  4082. }
  4083. struct ggml_tensor * ffn_inp = cur;
  4084. // feed forward
  4085. {
  4086. cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result
  4087. model.layers[il].ffn_up, NULL,
  4088. NULL, NULL,
  4089. model.layers[il].ffn_down, NULL,
  4090. NULL,
  4091. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4092. cb(cur, "ffn_out", il);
  4093. }
  4094. cur = ggml_add(ctx0, cur, ffn_inp);
  4095. cb(cur, "l_out", il);
  4096. cur = ggml_add(ctx0, cur, inpL);
  4097. cb(cur, "l_out", il);
  4098. // input for next layer
  4099. inpL = cur;
  4100. }
  4101. cur = inpL;
  4102. // norm
  4103. cur = llm_build_norm(ctx0, cur, hparams,
  4104. model.output_norm,
  4105. model.output_norm_b,
  4106. LLM_NORM, cb, -1);
  4107. cb(cur, "result_norm", -1);
  4108. cur = ggml_mul_mat(ctx0, model.output, cur);
  4109. cb(cur, "result_output", -1);
  4110. ggml_build_forward_expand(gf, cur);
  4111. return gf;
  4112. }
  4113. struct ggml_cgraph * build_starcoder() {
  4114. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4115. const int64_t n_embd_head = hparams.n_embd_head_v;
  4116. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4117. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4118. struct ggml_tensor * cur;
  4119. struct ggml_tensor * pos;
  4120. struct ggml_tensor * inpL;
  4121. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4122. cb(inpL, "inp_embd", -1);
  4123. // inp_pos - contains the positions
  4124. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4125. cb(inp_pos, "inp_pos", -1);
  4126. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4127. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4128. cb(KQ_mask, "KQ_mask", -1);
  4129. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  4130. cb(pos, "pos_embd", -1);
  4131. inpL = ggml_add(ctx0, inpL, pos);
  4132. cb(inpL, "inpL", -1);
  4133. for (int il = 0; il < n_layer; ++il) {
  4134. cur = llm_build_norm(ctx0, inpL, hparams,
  4135. model.layers[il].attn_norm,
  4136. model.layers[il].attn_norm_b,
  4137. LLM_NORM, cb, il);
  4138. cb(cur, "attn_norm", il);
  4139. // self-attention
  4140. {
  4141. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4142. cb(cur, "wqkv", il);
  4143. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4144. cb(cur, "bqkv", il);
  4145. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4146. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4147. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4148. cb(Qcur, "Qcur", il);
  4149. cb(Kcur, "Kcur", il);
  4150. cb(Vcur, "Vcur", il);
  4151. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4152. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4153. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4154. model.layers[il].wo, model.layers[il].bo,
  4155. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4156. cb(cur, "kqv_out", il);
  4157. }
  4158. // add the input
  4159. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4160. cb(ffn_inp, "ffn_inp", il);
  4161. // FF
  4162. {
  4163. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4164. model.layers[il].ffn_norm,
  4165. model.layers[il].ffn_norm_b,
  4166. LLM_NORM, cb, il);
  4167. cb(cur, "ffn_norm", il);
  4168. cur = llm_build_ffn(ctx0, cur,
  4169. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4170. NULL, NULL,
  4171. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4172. NULL,
  4173. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4174. cb(cur, "ffn_out", il);
  4175. }
  4176. inpL = ggml_add(ctx0, cur, ffn_inp);
  4177. cb(inpL, "l_out", il);
  4178. }
  4179. cur = llm_build_norm(ctx0, inpL, hparams,
  4180. model.output_norm,
  4181. model.output_norm_b,
  4182. LLM_NORM, cb, -1);
  4183. cb(cur, "result_norm", -1);
  4184. cur = ggml_mul_mat(ctx0, model.output, cur);
  4185. cb(cur, "result_output", -1);
  4186. ggml_build_forward_expand(gf, cur);
  4187. return gf;
  4188. }
  4189. struct ggml_cgraph * build_persimmon() {
  4190. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4191. const int64_t n_embd_head = hparams.n_embd_head_v;
  4192. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4193. const int64_t n_rot = n_embd_head_k / 2;
  4194. struct ggml_tensor * cur;
  4195. struct ggml_tensor * inpL;
  4196. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4197. cb(inpL, "imp_embd", -1);
  4198. // inp_pos - contains the positions
  4199. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4200. cb(inp_pos, "inp_pos", -1);
  4201. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4202. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4203. cb(KQ_mask, "KQ_mask", -1);
  4204. if (do_rope_shift) {
  4205. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  4206. }
  4207. for (int il = 0; il < n_layer; ++il) {
  4208. struct ggml_tensor * residual = inpL;
  4209. cur = llm_build_norm(ctx0, inpL, hparams,
  4210. model.layers[il].attn_norm,
  4211. model.layers[il].attn_norm_b,
  4212. LLM_NORM, cb, il);
  4213. cb(cur, "attn_norm", il);
  4214. // self attention
  4215. {
  4216. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4217. cb(cur, "wqkv", il);
  4218. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4219. cb(cur, "bqkv", il);
  4220. // split qkv
  4221. GGML_ASSERT(n_head_kv == n_head);
  4222. struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens);
  4223. cb(tmpqkv, "tmpqkv", il);
  4224. struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2));
  4225. cb(tmpqkv_perm, "tmpqkv", il);
  4226. struct ggml_tensor * tmpq = ggml_view_3d(
  4227. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4228. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4229. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4230. 0
  4231. );
  4232. cb(tmpq, "tmpq", il);
  4233. struct ggml_tensor * tmpk = ggml_view_3d(
  4234. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4235. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4236. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4237. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens
  4238. );
  4239. cb(tmpk, "tmpk", il);
  4240. // Q/K Layernorm
  4241. tmpq = llm_build_norm(ctx0, tmpq, hparams,
  4242. model.layers[il].attn_q_norm,
  4243. model.layers[il].attn_q_norm_b,
  4244. LLM_NORM, cb, il);
  4245. cb(tmpq, "tmpq", il);
  4246. tmpk = llm_build_norm(ctx0, tmpk, hparams,
  4247. model.layers[il].attn_k_norm,
  4248. model.layers[il].attn_k_norm_b,
  4249. LLM_NORM, cb, il);
  4250. cb(tmpk, "tmpk", il);
  4251. // RoPE the first n_rot of q/k, pass the other half, and concat.
  4252. struct ggml_tensor * qrot = ggml_view_3d(
  4253. ctx0, tmpq, n_rot, n_head, n_tokens,
  4254. ggml_element_size(tmpq) * n_embd_head,
  4255. ggml_element_size(tmpq) * n_embd_head * n_head,
  4256. 0
  4257. );
  4258. cb(qrot, "qrot", il);
  4259. struct ggml_tensor * krot = ggml_view_3d(
  4260. ctx0, tmpk, n_rot, n_head, n_tokens,
  4261. ggml_element_size(tmpk) * n_embd_head,
  4262. ggml_element_size(tmpk) * n_embd_head * n_head,
  4263. 0
  4264. );
  4265. cb(krot, "krot", il);
  4266. // get the second half of tmpq, e.g tmpq[n_rot:, :, :]
  4267. struct ggml_tensor * qpass = ggml_view_3d(
  4268. ctx0, tmpq, n_rot, n_head, n_tokens,
  4269. ggml_element_size(tmpq) * n_embd_head,
  4270. ggml_element_size(tmpq) * n_embd_head * n_head,
  4271. ggml_element_size(tmpq) * n_rot
  4272. );
  4273. cb(qpass, "qpass", il);
  4274. struct ggml_tensor * kpass = ggml_view_3d(
  4275. ctx0, tmpk, n_rot, n_head, n_tokens,
  4276. ggml_element_size(tmpk) * n_embd_head,
  4277. ggml_element_size(tmpk) * n_embd_head * n_head,
  4278. ggml_element_size(tmpk) * n_rot
  4279. );
  4280. cb(kpass, "kpass", il);
  4281. struct ggml_tensor * qrotated = ggml_rope_custom(
  4282. ctx0, qrot, inp_pos, n_rot, 2, 0, n_orig_ctx,
  4283. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4284. );
  4285. cb(qrotated, "qrotated", il);
  4286. struct ggml_tensor * krotated = ggml_rope_custom(
  4287. ctx0, krot, inp_pos, n_rot, 2, 0, n_orig_ctx,
  4288. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4289. );
  4290. cb(krotated, "krotated", il);
  4291. // ggml currently only supports concatenation on dim=2
  4292. // so we need to permute qrot, qpass, concat, then permute back.
  4293. qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3));
  4294. cb(qrotated, "qrotated", il);
  4295. krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3));
  4296. cb(krotated, "krotated", il);
  4297. qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3));
  4298. cb(qpass, "qpass", il);
  4299. kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3));
  4300. cb(kpass, "kpass", il);
  4301. struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass);
  4302. cb(Qcur, "Qcur", il);
  4303. struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass);
  4304. cb(Kcur, "Kcur", il);
  4305. struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 2, 1, 0, 3));
  4306. cb(Q, "Q", il);
  4307. Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3));
  4308. cb(Kcur, "Kcur", il);
  4309. struct ggml_tensor * Vcur = ggml_view_3d(
  4310. ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
  4311. ggml_element_size(tmpqkv_perm) * n_embd_head,
  4312. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
  4313. ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2
  4314. );
  4315. cb(Vcur, "Vcur", il);
  4316. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4317. // TODO: not tested, could be broken
  4318. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4319. model.layers[il].wo, model.layers[il].bo,
  4320. Q, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4321. cb(cur, "kqv_out", il);
  4322. }
  4323. struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
  4324. cb(ffn_inp, "ffn_inp", il);
  4325. // feed-forward network
  4326. {
  4327. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4328. model.layers[il].ffn_norm,
  4329. model.layers[il].ffn_norm_b,
  4330. LLM_NORM, cb, il);
  4331. cb(cur, "ffn_norm", il);
  4332. cur = llm_build_ffn(ctx0, cur,
  4333. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4334. NULL, NULL,
  4335. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4336. NULL,
  4337. LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il);
  4338. cb(cur, "ffn_out", il);
  4339. }
  4340. cur = ggml_add(ctx0, cur, ffn_inp);
  4341. cb(cur, "l_out", il);
  4342. inpL = cur;
  4343. }
  4344. cur = inpL;
  4345. cur = llm_build_norm(ctx0, cur, hparams,
  4346. model.output_norm,
  4347. model.output_norm_b,
  4348. LLM_NORM, cb, -1);
  4349. cb(cur, "result_norm", -1);
  4350. cur = ggml_mul_mat(ctx0, model.output, cur);
  4351. cb(cur, "result_output", -1);
  4352. ggml_build_forward_expand(gf, cur);
  4353. return gf;
  4354. }
  4355. struct ggml_cgraph * build_refact() {
  4356. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4357. const int64_t n_embd_head = hparams.n_embd_head_v;
  4358. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4359. struct ggml_tensor * cur;
  4360. struct ggml_tensor * inpL;
  4361. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4362. cb(inpL, "inp_embd", -1);
  4363. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4364. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4365. cb(KQ_mask, "KQ_mask", -1);
  4366. for (int il = 0; il < n_layer; ++il) {
  4367. struct ggml_tensor * inpSA = inpL;
  4368. cur = llm_build_norm(ctx0, inpL, hparams,
  4369. model.layers[il].attn_norm, NULL,
  4370. LLM_NORM_RMS, cb, il);
  4371. cb(cur, "attn_norm", il);
  4372. // self-attention
  4373. {
  4374. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4375. cb(Qcur, "Qcur", il);
  4376. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4377. cb(Kcur, "Kcur", il);
  4378. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4379. cb(Vcur, "Vcur", il);
  4380. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4381. cb(Kcur, "Kcur", il);
  4382. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4383. cb(Qcur, "Qcur", il);
  4384. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4385. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4386. model.layers[il].wo, NULL,
  4387. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4388. cb(cur, "kqv_out", il);
  4389. }
  4390. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4391. cb(ffn_inp, "ffn_inp", il);
  4392. // feed-forward network
  4393. {
  4394. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4395. model.layers[il].ffn_norm, NULL,
  4396. LLM_NORM_RMS, cb, il);
  4397. cb(cur, "ffn_norm", il);
  4398. cur = llm_build_ffn(ctx0, cur,
  4399. model.layers[il].ffn_up, NULL,
  4400. model.layers[il].ffn_gate, NULL,
  4401. model.layers[il].ffn_down, NULL,
  4402. NULL,
  4403. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4404. cb(cur, "ffn_out", il);
  4405. }
  4406. cur = ggml_add(ctx0, cur, ffn_inp);
  4407. cb(cur, "l_out", il);
  4408. // input for next layer
  4409. inpL = cur;
  4410. }
  4411. cur = inpL;
  4412. cur = llm_build_norm(ctx0, cur, hparams,
  4413. model.output_norm, NULL,
  4414. LLM_NORM_RMS, cb, -1);
  4415. cb(cur, "result_norm", -1);
  4416. // lm_head
  4417. cur = ggml_mul_mat(ctx0, model.output, cur);
  4418. cb(cur, "result_output", -1);
  4419. ggml_build_forward_expand(gf, cur);
  4420. return gf;
  4421. }
  4422. struct ggml_cgraph * build_bloom() {
  4423. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4424. const int64_t n_embd_head = hparams.n_embd_head_v;
  4425. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4426. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4427. struct ggml_tensor * cur;
  4428. struct ggml_tensor * inpL;
  4429. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4430. cb(inpL, "inp_embd", -1);
  4431. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4432. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4433. cb(KQ_mask, "KQ_mask", -1);
  4434. inpL = llm_build_norm(ctx0, inpL, hparams,
  4435. model.tok_norm,
  4436. model.tok_norm_b,
  4437. LLM_NORM, cb, -1);
  4438. cb(inpL, "inp_norm", -1);
  4439. for (int il = 0; il < n_layer; ++il) {
  4440. cur = llm_build_norm(ctx0, inpL, hparams,
  4441. model.layers[il].attn_norm,
  4442. model.layers[il].attn_norm_b,
  4443. LLM_NORM, cb, il);
  4444. cb(cur, "attn_norm", il);
  4445. // self-attention
  4446. {
  4447. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4448. cb(cur, "wqkv", il);
  4449. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4450. cb(cur, "bqkv", il);
  4451. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4452. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4453. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4454. cb(Qcur, "Qcur", il);
  4455. cb(Kcur, "Kcur", il);
  4456. cb(Vcur, "Vcur", il);
  4457. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4458. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4459. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4460. model.layers[il].wo, model.layers[il].bo,
  4461. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4462. cb(cur, "kqv_out", il);
  4463. }
  4464. // Add the input
  4465. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4466. cb(ffn_inp, "ffn_inp", il);
  4467. // FF
  4468. {
  4469. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4470. model.layers[il].ffn_norm,
  4471. model.layers[il].ffn_norm_b,
  4472. LLM_NORM, cb, il);
  4473. cb(cur, "ffn_norm", il);
  4474. cur = llm_build_ffn(ctx0, cur,
  4475. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4476. NULL, NULL,
  4477. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4478. NULL,
  4479. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4480. cb(cur, "ffn_out", il);
  4481. }
  4482. inpL = ggml_add(ctx0, cur, ffn_inp);
  4483. cb(inpL, "l_out", il);
  4484. }
  4485. cur = llm_build_norm(ctx0, inpL, hparams,
  4486. model.output_norm,
  4487. model.output_norm_b,
  4488. LLM_NORM, cb, -1);
  4489. cb(cur, "result_norm", -1);
  4490. cur = ggml_mul_mat(ctx0, model.output, cur);
  4491. cb(cur, "result_output", -1);
  4492. ggml_build_forward_expand(gf, cur);
  4493. return gf;
  4494. }
  4495. struct ggml_cgraph * build_mpt() {
  4496. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4497. const int64_t n_embd_head = hparams.n_embd_head_v;
  4498. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4499. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4500. struct ggml_tensor * cur;
  4501. struct ggml_tensor * inpL;
  4502. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4503. cb(inpL, "inp_embd", -1);
  4504. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4505. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4506. cb(KQ_mask, "KQ_mask", -1);
  4507. for (int il = 0; il < n_layer; ++il) {
  4508. struct ggml_tensor * attn_norm;
  4509. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  4510. model.layers[il].attn_norm,
  4511. NULL,
  4512. LLM_NORM, cb, il);
  4513. cb(attn_norm, "attn_norm", il);
  4514. // self-attention
  4515. {
  4516. cur = attn_norm;
  4517. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4518. cb(cur, "wqkv", il);
  4519. if (hparams.f_clamp_kqv > 0.0f) {
  4520. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  4521. cb(cur, "wqkv_clamped", il);
  4522. }
  4523. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4524. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4525. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4526. cb(Qcur, "Qcur", il);
  4527. cb(Kcur, "Kcur", il);
  4528. cb(Vcur, "Vcur", il);
  4529. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4530. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4531. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4532. model.layers[il].wo, NULL,
  4533. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4534. cb(cur, "kqv_out", il);
  4535. }
  4536. // Add the input
  4537. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4538. cb(ffn_inp, "ffn_inp", il);
  4539. // feed forward
  4540. {
  4541. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4542. model.layers[il].ffn_norm,
  4543. NULL,
  4544. LLM_NORM, cb, il);
  4545. cb(cur, "ffn_norm", il);
  4546. cur = llm_build_ffn(ctx0, cur,
  4547. model.layers[il].ffn_up, NULL,
  4548. NULL, NULL,
  4549. model.layers[il].ffn_down, NULL,
  4550. model.layers[il].ffn_act,
  4551. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4552. cb(cur, "ffn_out", il);
  4553. }
  4554. cur = ggml_add(ctx0, cur, ffn_inp);
  4555. cb(cur, "l_out", il);
  4556. // input for next layer
  4557. inpL = cur;
  4558. }
  4559. cur = inpL;
  4560. cur = llm_build_norm(ctx0, cur, hparams,
  4561. model.output_norm,
  4562. NULL,
  4563. LLM_NORM, cb, -1);
  4564. cb(cur, "result_norm", -1);
  4565. cur = ggml_mul_mat(ctx0, model.output, cur);
  4566. cb(cur, "result_output", -1);
  4567. ggml_build_forward_expand(gf, cur);
  4568. return gf;
  4569. }
  4570. struct ggml_cgraph * build_stablelm() {
  4571. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  4572. const int64_t n_embd_head = hparams.n_embd_head_v;
  4573. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4574. struct ggml_tensor * cur;
  4575. struct ggml_tensor * inpL;
  4576. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4577. cb(inpL, "inp_embd", -1);
  4578. // inp_pos - contains the positions
  4579. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4580. cb(inp_pos, "inp_pos", -1);
  4581. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4582. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4583. cb(KQ_mask, "KQ_mask", -1);
  4584. // shift the entire K-cache if needed
  4585. if (do_rope_shift) {
  4586. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, hparams.n_rot, freq_base, freq_scale, cb);
  4587. }
  4588. for (int il = 0; il < n_layer; ++il) {
  4589. struct ggml_tensor * inpSA = inpL;
  4590. // norm
  4591. cur = llm_build_norm(ctx0, inpL, hparams,
  4592. model.layers[il].attn_norm,
  4593. model.layers[il].attn_norm_b,
  4594. LLM_NORM, cb, il);
  4595. cb(cur, "attn_norm", il);
  4596. // self-attention
  4597. {
  4598. // compute Q and K and RoPE them
  4599. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4600. cb(Qcur, "Qcur", il);
  4601. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4602. cb(Kcur, "Kcur", il);
  4603. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4604. cb(Vcur, "Vcur", il);
  4605. Qcur = ggml_rope_custom(
  4606. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  4607. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4608. ext_factor, attn_factor, beta_fast, beta_slow
  4609. );
  4610. cb(Qcur, "Qcur", il);
  4611. Kcur = ggml_rope_custom(
  4612. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  4613. hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4614. ext_factor, attn_factor, beta_fast, beta_slow
  4615. );
  4616. cb(Kcur, "Kcur", il);
  4617. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4618. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4619. model.layers[il].wo, NULL,
  4620. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4621. cb(cur, "kqv_out", il);
  4622. }
  4623. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4624. cb(ffn_inp, "ffn_inp", il);
  4625. // feed-forward network
  4626. {
  4627. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4628. model.layers[il].ffn_norm,
  4629. model.layers[il].ffn_norm_b,
  4630. LLM_NORM, cb, il);
  4631. cb(cur, "ffn_norm", il);
  4632. cur = llm_build_ffn(ctx0, cur,
  4633. model.layers[il].ffn_up, NULL,
  4634. model.layers[il].ffn_gate, NULL,
  4635. model.layers[il].ffn_down, NULL,
  4636. NULL,
  4637. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4638. cb(cur, "ffn_out", il);
  4639. }
  4640. cur = ggml_add(ctx0, cur, ffn_inp);
  4641. cb(cur, "l_out", il);
  4642. // input for next layer
  4643. inpL = cur;
  4644. }
  4645. cur = inpL;
  4646. cur = llm_build_norm(ctx0, cur, hparams,
  4647. model.output_norm,
  4648. model.output_norm_b,
  4649. LLM_NORM, cb, -1);
  4650. cb(cur, "result_norm", -1);
  4651. // lm_head
  4652. cur = ggml_mul_mat(ctx0, model.output, cur);
  4653. cb(cur, "result_output", -1);
  4654. ggml_build_forward_expand(gf, cur);
  4655. return gf;
  4656. }
  4657. struct ggml_cgraph * build_qwen() {
  4658. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4659. const int64_t n_embd_head = hparams.n_embd_head_v;
  4660. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4661. struct ggml_tensor * cur;
  4662. struct ggml_tensor * inpL;
  4663. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4664. cb(inpL, "inp_embd", -1);
  4665. // inp_pos - contains the positions
  4666. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4667. cb(inp_pos, "inp_pos", -1);
  4668. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4669. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4670. cb(KQ_mask, "KQ_mask", -1);
  4671. // shift the entire K-cache if needed
  4672. if (do_rope_shift) {
  4673. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  4674. }
  4675. for (int il = 0; il < n_layer; ++il) {
  4676. struct ggml_tensor * inpSA = inpL;
  4677. cur = llm_build_norm(ctx0, inpL, hparams,
  4678. model.layers[il].attn_norm, NULL,
  4679. LLM_NORM_RMS, cb, il);
  4680. cb(cur, "attn_norm", il);
  4681. // self-attention
  4682. {
  4683. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4684. cb(cur, "wqkv", il);
  4685. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4686. cb(cur, "bqkv", il);
  4687. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4688. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4689. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd)));
  4690. cb(Qcur, "Qcur", il);
  4691. cb(Kcur, "Kcur", il);
  4692. cb(Vcur, "Vcur", il);
  4693. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4694. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4695. // using mode = 2 for neox mode
  4696. Qcur = ggml_rope_custom(
  4697. ctx0, Qcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
  4698. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4699. );
  4700. cb(Qcur, "Qcur", il);
  4701. Kcur = ggml_rope_custom(
  4702. ctx0, Kcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
  4703. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4704. );
  4705. cb(Kcur, "Kcur", il);
  4706. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4707. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4708. model.layers[il].wo, NULL,
  4709. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4710. cb(cur, "kqv_out", il);
  4711. }
  4712. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  4713. cb(ffn_inp, "ffn_inp", il);
  4714. // feed-forward forward
  4715. {
  4716. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4717. model.layers[il].ffn_norm, NULL,
  4718. LLM_NORM_RMS, cb, il);
  4719. cb(cur, "ffn_norm", il);
  4720. cur = llm_build_ffn(ctx0, cur,
  4721. model.layers[il].ffn_up, NULL,
  4722. model.layers[il].ffn_gate, NULL,
  4723. model.layers[il].ffn_down, NULL,
  4724. NULL,
  4725. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4726. cb(cur, "ffn_out", il);
  4727. }
  4728. cur = ggml_add(ctx0, cur, ffn_inp);
  4729. cb(cur, "l_out", il);
  4730. // input for next layer
  4731. inpL = cur;
  4732. }
  4733. cur = inpL;
  4734. cur = llm_build_norm(ctx0, cur, hparams,
  4735. model.output_norm, NULL,
  4736. LLM_NORM_RMS, cb, -1);
  4737. cb(cur, "result_norm", -1);
  4738. // lm_head
  4739. cur = ggml_mul_mat(ctx0, model.output, cur);
  4740. cb(cur, "result_output", -1);
  4741. ggml_build_forward_expand(gf, cur);
  4742. return gf;
  4743. }
  4744. struct ggml_cgraph * build_phi2() {
  4745. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4746. const int64_t n_embd_head = hparams.n_embd_head_v;
  4747. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4748. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4749. struct ggml_tensor * cur;
  4750. struct ggml_tensor * attn_norm_output;
  4751. struct ggml_tensor * ffn_output;
  4752. struct ggml_tensor * inpL;
  4753. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4754. cb(inpL, "inp_embd", -1);
  4755. // inp_pos - contains the positions
  4756. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4757. cb(inp_pos, "inp_pos", -1);
  4758. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4759. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4760. cb(KQ_mask, "KQ_mask", -1);
  4761. // shift the entire K-cache if needed
  4762. if (do_rope_shift) {
  4763. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  4764. }
  4765. for (int il = 0; il < n_layer; ++il) {
  4766. attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
  4767. model.layers[il].attn_norm,
  4768. model.layers[il].attn_norm_b,
  4769. LLM_NORM, cb, il);
  4770. cb(attn_norm_output, "attn_norm", il);
  4771. // self-attention
  4772. {
  4773. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output);
  4774. cb(cur, "wqkv", il);
  4775. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4776. cb(cur, "bqkv", il);
  4777. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4778. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4779. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4780. cb(Qcur, "Qcur", il);
  4781. cb(Kcur, "Kcur", il);
  4782. cb(Vcur, "Vcur", il);
  4783. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4784. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  4785. Qcur = ggml_rope_custom(
  4786. ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4787. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4788. );
  4789. cb(Qcur, "Qcur", il);
  4790. // with phi2, we scale the Q to avoid precision issues
  4791. // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
  4792. Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head)));
  4793. cb(Qcur, "Qcur", il);
  4794. Kcur = ggml_rope_custom(
  4795. ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
  4796. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  4797. );
  4798. cb(Kcur, "Kcur", il);
  4799. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4800. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4801. model.layers[il].wo, model.layers[il].bo,
  4802. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f, cb, il);
  4803. cb(cur, "kqv_out", il);
  4804. }
  4805. // FF
  4806. {
  4807. ffn_output = llm_build_ffn(ctx0, attn_norm_output,
  4808. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4809. NULL, NULL,
  4810. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4811. NULL,
  4812. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4813. cb(ffn_output, "ffn_out", il);
  4814. }
  4815. cur = ggml_add(ctx0, cur, ffn_output);
  4816. cb(cur, "l_out", il);
  4817. cur = ggml_add(ctx0, cur, inpL);
  4818. cb(cur, "l_out", il);
  4819. inpL = cur;
  4820. }
  4821. cur = llm_build_norm(ctx0, inpL, hparams,
  4822. model.output_norm,
  4823. model.output_norm_b,
  4824. LLM_NORM, cb, -1);
  4825. cb(cur, "result_norm", -1);
  4826. cur = ggml_mul_mat(ctx0, model.output, cur);
  4827. cb(cur, "result_output_no_bias", -1);
  4828. cur = ggml_add(ctx0, cur, model.output_b);
  4829. cb(cur, "result_output", -1);
  4830. ggml_build_forward_expand(gf, cur);
  4831. return gf;
  4832. }
  4833. struct ggml_cgraph * build_plamo() {
  4834. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  4835. const int64_t n_embd_head = hparams.n_embd_head_v;
  4836. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4837. struct ggml_tensor * cur;
  4838. struct ggml_tensor * inpL;
  4839. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4840. cb(inpL, "inp_embd", -1);
  4841. // inp_pos - contains the positions
  4842. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4843. cb(inp_pos, "inp_pos", -1);
  4844. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4845. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4846. cb(KQ_mask, "KQ_mask", -1);
  4847. // shift the entire K-cache if needed
  4848. if (do_rope_shift) {
  4849. llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb);
  4850. }
  4851. for (int il = 0; il < n_layer; ++il) {
  4852. // norm
  4853. cur = llm_build_norm(ctx0, inpL, hparams,
  4854. model.layers[il].attn_norm, NULL,
  4855. LLM_NORM_RMS, cb, il);
  4856. cb(cur, "attn_norm", il);
  4857. struct ggml_tensor * attention_norm = cur;
  4858. // self-attention
  4859. {
  4860. // compute Q and K and RoPE them
  4861. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  4862. cb(Qcur, "Qcur", il);
  4863. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  4864. cb(Kcur, "Kcur", il);
  4865. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  4866. cb(Vcur, "Vcur", il);
  4867. Qcur = ggml_rope_custom(
  4868. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  4869. n_embd_head, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4870. ext_factor, attn_factor, beta_fast, beta_slow);
  4871. cb(Qcur, "Qcur", il);
  4872. Kcur = ggml_rope_custom(
  4873. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  4874. n_embd_head, 2, 0, n_orig_ctx, freq_base, freq_scale,
  4875. ext_factor, attn_factor, beta_fast, beta_slow);
  4876. cb(Kcur, "Kcur", il);
  4877. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4878. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4879. model.layers[il].wo, NULL,
  4880. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4881. cb(cur, "kqv_out", il);
  4882. }
  4883. struct ggml_tensor * sa_out = cur;
  4884. cur = attention_norm;
  4885. // feed-forward network
  4886. {
  4887. cur = llm_build_ffn(ctx0, cur,
  4888. model.layers[il].ffn_up, NULL,
  4889. model.layers[il].ffn_gate, NULL,
  4890. model.layers[il].ffn_down, NULL,
  4891. NULL,
  4892. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  4893. cb(cur, "ffn_out", il);
  4894. }
  4895. cur = ggml_add(ctx0, cur, sa_out);
  4896. cb(cur, "l_out", il);
  4897. cur = ggml_add(ctx0, cur, inpL);
  4898. cb(cur, "l_out", il);
  4899. // input for next layer
  4900. inpL = cur;
  4901. }
  4902. cur = inpL;
  4903. cur = llm_build_norm(ctx0, cur, hparams,
  4904. model.output_norm, NULL,
  4905. LLM_NORM_RMS, cb, -1);
  4906. cb(cur, "result_norm", -1);
  4907. // lm_head
  4908. cur = ggml_mul_mat(ctx0, model.output, cur);
  4909. cb(cur, "result_output", -1);
  4910. ggml_build_forward_expand(gf, cur);
  4911. return gf;
  4912. }
  4913. struct ggml_cgraph * build_gpt2() {
  4914. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  4915. const int64_t n_embd_head = hparams.n_embd_head_v;
  4916. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  4917. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  4918. struct ggml_tensor * cur;
  4919. struct ggml_tensor * pos;
  4920. struct ggml_tensor * inpL;
  4921. inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
  4922. cb(inpL, "inp_embd", -1);
  4923. // inp_pos - contains the positions
  4924. struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  4925. cb(inp_pos, "inp_pos", -1);
  4926. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  4927. struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
  4928. cb(KQ_mask, "KQ_mask", -1);
  4929. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  4930. cb(pos, "pos_embd", -1);
  4931. inpL = ggml_add(ctx0, inpL, pos);
  4932. cb(inpL, "inpL", -1);
  4933. for (int il = 0; il < n_layer; ++il) {
  4934. cur = llm_build_norm(ctx0, inpL, hparams,
  4935. model.layers[il].attn_norm,
  4936. model.layers[il].attn_norm_b,
  4937. LLM_NORM, cb, il);
  4938. cb(cur, "attn_norm", il);
  4939. // self-attention
  4940. {
  4941. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  4942. cb(cur, "wqkv", il);
  4943. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  4944. cb(cur, "bqkv", il);
  4945. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  4946. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  4947. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  4948. cb(Qcur, "Qcur", il);
  4949. cb(Kcur, "Kcur", il);
  4950. cb(Vcur, "Vcur", il);
  4951. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  4952. llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
  4953. cur = llm_build_kqv(ctx0, model, hparams, kv_self,
  4954. model.layers[il].wo, model.layers[il].bo,
  4955. Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  4956. cb(cur, "kqv_out", il);
  4957. }
  4958. // add the input
  4959. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  4960. cb(ffn_inp, "ffn_inp", il);
  4961. // FF
  4962. {
  4963. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  4964. model.layers[il].ffn_norm,
  4965. model.layers[il].ffn_norm_b,
  4966. LLM_NORM, cb, il);
  4967. cb(cur, "ffn_norm", il);
  4968. cur = llm_build_ffn(ctx0, cur,
  4969. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  4970. NULL, NULL,
  4971. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  4972. NULL,
  4973. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  4974. cb(cur, "ffn_out", il);
  4975. }
  4976. inpL = ggml_add(ctx0, cur, ffn_inp);
  4977. cb(inpL, "l_out", il);
  4978. }
  4979. cur = llm_build_norm(ctx0, inpL, hparams,
  4980. model.output_norm,
  4981. model.output_norm_b,
  4982. LLM_NORM, cb, -1);
  4983. cb(cur, "result_norm", -1);
  4984. cur = ggml_mul_mat(ctx0, model.output, cur);
  4985. cb(cur, "result_output", -1);
  4986. ggml_build_forward_expand(gf, cur);
  4987. return gf;
  4988. }
  4989. };
  4990. //
  4991. // tensor offloading helpers
  4992. //
  4993. // TODO: will be removed with backend v2
  4994. enum llm_offload_func_e {
  4995. OFFLOAD_FUNC_NOP,
  4996. OFFLOAD_FUNC,
  4997. OFFLOAD_FUNC_FRC, // force offload
  4998. OFFLOAD_FUNC_KQV,
  4999. OFFLOAD_FUNC_NR,
  5000. OFFLOAD_FUNC_EMB, // embeddings
  5001. OFFLOAD_FUNC_OUT,
  5002. };
  5003. // TODO: will be removed with backend v2
  5004. struct llm_offload_trie {
  5005. struct node {
  5006. ~node() {
  5007. for (int i = 0; i < 256; ++i) {
  5008. if (children[i]) {
  5009. delete children[i];
  5010. }
  5011. }
  5012. }
  5013. node * children[256] = { nullptr };
  5014. llm_offload_func_e func = OFFLOAD_FUNC_NOP;
  5015. };
  5016. llm_offload_trie() {
  5017. root = new node;
  5018. }
  5019. llm_offload_trie(const std::unordered_map<const char *, llm_offload_func_e> & map) {
  5020. root = new node;
  5021. for (const auto & kv : map) {
  5022. add(kv.first, kv.second);
  5023. }
  5024. }
  5025. ~llm_offload_trie() {
  5026. delete root;
  5027. }
  5028. void add(const char * name, llm_offload_func_e func) {
  5029. node * cur = root;
  5030. for (int i = 0; ; ++i) {
  5031. const uint8_t c = name[i];
  5032. if (!c) {
  5033. break;
  5034. }
  5035. if (!cur->children[c]) {
  5036. cur->children[c] = new node;
  5037. }
  5038. cur = cur->children[c];
  5039. }
  5040. cur->func = func;
  5041. }
  5042. llm_offload_func_e find(const char * name) const {
  5043. const node * cur = root;
  5044. for (int i = 0; ; ++i) {
  5045. const uint8_t c = name[i];
  5046. if (!c) {
  5047. break;
  5048. }
  5049. if (!cur->children[c]) {
  5050. return OFFLOAD_FUNC_NOP;
  5051. }
  5052. cur = cur->children[c];
  5053. }
  5054. return cur->func;
  5055. }
  5056. node * root = nullptr;
  5057. };
  5058. // TODO: will be removed with backend v2
  5059. static const std::unordered_map<const char *, llm_offload_func_e> k_offload_map = {
  5060. //{ "inp_tokens", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel
  5061. //{ "inp_embd", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel
  5062. { "pos_embd", OFFLOAD_FUNC_NR },
  5063. { "inp_pos", OFFLOAD_FUNC_FRC }, // this is often used for KQ ops (e.g. rope)
  5064. { "KQ_mask", OFFLOAD_FUNC_FRC },
  5065. { "K_shift", OFFLOAD_FUNC_FRC },
  5066. { "K_shifted", OFFLOAD_FUNC },
  5067. { "inp_norm", OFFLOAD_FUNC_NR },
  5068. { "inp_norm_w", OFFLOAD_FUNC_NR },
  5069. { "inp_norm_wb", OFFLOAD_FUNC_NR },
  5070. { "norm", OFFLOAD_FUNC },
  5071. { "norm_w", OFFLOAD_FUNC },
  5072. { "norm_wb", OFFLOAD_FUNC },
  5073. { "attn_norm", OFFLOAD_FUNC },
  5074. { "attn_norm_2", OFFLOAD_FUNC },
  5075. { "wqkv", OFFLOAD_FUNC_KQV },
  5076. { "bqkv", OFFLOAD_FUNC_KQV },
  5077. { "wqkv_clamped", OFFLOAD_FUNC_KQV },
  5078. { "tmpk", OFFLOAD_FUNC_KQV },
  5079. { "tmpq", OFFLOAD_FUNC_KQV },
  5080. { "tmpv", OFFLOAD_FUNC_KQV },
  5081. { "Kcur", OFFLOAD_FUNC_KQV },
  5082. { "Qcur", OFFLOAD_FUNC_KQV },
  5083. { "Vcur", OFFLOAD_FUNC_KQV },
  5084. { "krot", OFFLOAD_FUNC_KQV },
  5085. { "qrot", OFFLOAD_FUNC_KQV },
  5086. { "kpass", OFFLOAD_FUNC_KQV },
  5087. { "qpass", OFFLOAD_FUNC_KQV },
  5088. { "krotated", OFFLOAD_FUNC_KQV },
  5089. { "qrotated", OFFLOAD_FUNC_KQV },
  5090. { "q", OFFLOAD_FUNC_KQV },
  5091. { "k", OFFLOAD_FUNC_KQV },
  5092. { "kq", OFFLOAD_FUNC_KQV },
  5093. { "kq_scaled", OFFLOAD_FUNC_KQV },
  5094. { "kq_scaled_alibi", OFFLOAD_FUNC_KQV },
  5095. { "kq_masked", OFFLOAD_FUNC_KQV },
  5096. { "kq_soft_max", OFFLOAD_FUNC_KQV },
  5097. { "kq_soft_max_ext", OFFLOAD_FUNC_KQV },
  5098. { "v", OFFLOAD_FUNC_KQV },
  5099. { "kqv", OFFLOAD_FUNC_KQV },
  5100. { "kqv_merged", OFFLOAD_FUNC_KQV },
  5101. { "kqv_merged_cont", OFFLOAD_FUNC_KQV },
  5102. { "kqv_wo", OFFLOAD_FUNC_KQV },
  5103. { "kqv_out", OFFLOAD_FUNC_KQV },
  5104. { "ffn_inp", OFFLOAD_FUNC },
  5105. { "ffn_norm", OFFLOAD_FUNC },
  5106. { "ffn_up", OFFLOAD_FUNC },
  5107. { "ffn_up_b", OFFLOAD_FUNC },
  5108. { "ffn_gate", OFFLOAD_FUNC },
  5109. { "ffn_gate_b", OFFLOAD_FUNC },
  5110. { "ffn_gate_par", OFFLOAD_FUNC },
  5111. { "ffn_act", OFFLOAD_FUNC },
  5112. { "ffn_down", OFFLOAD_FUNC },
  5113. { "ffn_down_b", OFFLOAD_FUNC },
  5114. { "ffn_out", OFFLOAD_FUNC },
  5115. { "ffn_silu", OFFLOAD_FUNC },
  5116. { "ffn_gelu", OFFLOAD_FUNC },
  5117. { "ffn_relu", OFFLOAD_FUNC },
  5118. { "ffn_sqr(relu)", OFFLOAD_FUNC },
  5119. { "ffn_moe_logits", OFFLOAD_FUNC },
  5120. { "ffn_moe_probs", OFFLOAD_FUNC },
  5121. { "ffn_moe_argsort", OFFLOAD_FUNC },
  5122. { "ffn_moe_weights", OFFLOAD_FUNC },
  5123. { "ffn_moe_weights_sum", OFFLOAD_FUNC },
  5124. { "ffn_moe_weights_norm", OFFLOAD_FUNC },
  5125. { "ffn_moe_weighted", OFFLOAD_FUNC },
  5126. { "ffn_moe_up", OFFLOAD_FUNC },
  5127. { "ffn_moe_gate", OFFLOAD_FUNC },
  5128. { "ffn_moe_silu", OFFLOAD_FUNC },
  5129. { "ffn_moe_gate_par", OFFLOAD_FUNC },
  5130. { "ffn_moe_down", OFFLOAD_FUNC },
  5131. { "ffn_moe_out", OFFLOAD_FUNC },
  5132. { "l_out", OFFLOAD_FUNC },
  5133. { "result_norm", OFFLOAD_FUNC_EMB },
  5134. { "result_output_no_bias", OFFLOAD_FUNC_EMB },
  5135. { "result_output", OFFLOAD_FUNC_OUT },
  5136. };
  5137. static llm_offload_trie k_offload_func_trie(k_offload_map);
  5138. static struct ggml_cgraph * llama_build_graph(
  5139. llama_context & lctx,
  5140. const llama_batch & batch) {
  5141. const auto & model = lctx.model;
  5142. // check if we should build the worst-case graph (for memory measurement)
  5143. const bool worst_case = ggml_allocr_is_measure(lctx.alloc);
  5144. // keep track of the input that has already been allocated
  5145. bool alloc_inp_tokens = false;
  5146. bool alloc_inp_embd = false;
  5147. bool alloc_inp_pos = false;
  5148. bool alloc_inp_KQ_mask = false;
  5149. bool alloc_inp_K_shift = false;
  5150. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  5151. const bool do_offload = true;
  5152. #else
  5153. const bool do_offload = true; // TODO: set to false after finishing refactoring
  5154. #endif
  5155. int n_non_view = 0; // number of non-view tensors that have been processed by the callback
  5156. // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
  5157. // TODO: will be removed with backend v2
  5158. llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
  5159. if (il >= 0) {
  5160. ggml_format_name(cur, "%s-%d", name, il);
  5161. } else {
  5162. ggml_set_name(cur, name);
  5163. }
  5164. //
  5165. // allocate input tensors and set input data
  5166. //
  5167. // TODO: will be removed with backend v2
  5168. if (!alloc_inp_tokens && strcmp(name, "inp_tokens") == 0) {
  5169. ggml_allocr_alloc(lctx.alloc, cur);
  5170. if (!ggml_allocr_is_measure(lctx.alloc) && batch.token) {
  5171. const int64_t n_tokens = cur->ne[0];
  5172. ggml_backend_tensor_set(cur, batch.token, 0, n_tokens*ggml_element_size(cur));
  5173. }
  5174. alloc_inp_tokens = true;
  5175. }
  5176. if (!alloc_inp_embd && strcmp(name, "inp_embd") == 0) {
  5177. ggml_allocr_alloc(lctx.alloc, cur);
  5178. if (!ggml_allocr_is_measure(lctx.alloc) && batch.embd) {
  5179. const int64_t n_embd = cur->ne[0];
  5180. const int64_t n_tokens = cur->ne[1];
  5181. ggml_backend_tensor_set(cur, batch.embd, 0, n_tokens*n_embd*ggml_element_size(cur));
  5182. }
  5183. alloc_inp_embd = true;
  5184. }
  5185. if (!alloc_inp_pos && strcmp(name, "inp_pos") == 0) {
  5186. ggml_allocr_alloc(lctx.alloc, cur);
  5187. if (!ggml_allocr_is_measure(lctx.alloc) && batch.pos) {
  5188. const int64_t n_tokens = cur->ne[0];
  5189. static_assert(std::is_same<llama_pos, int32_t>::value, "llama_pos must be int32_t");
  5190. ggml_backend_tensor_set(cur, batch.pos, 0, n_tokens*ggml_element_size(cur));
  5191. }
  5192. alloc_inp_pos = true;
  5193. }
  5194. if (!alloc_inp_KQ_mask && strcmp(name, "KQ_mask") == 0) {
  5195. ggml_allocr_alloc(lctx.alloc, cur);
  5196. if (!ggml_allocr_is_measure(lctx.alloc)) {
  5197. const int64_t n_kv = cur->ne[0];
  5198. const int64_t n_tokens = cur->ne[1];
  5199. float * data;
  5200. if (ggml_backend_buffer_is_host(cur->buffer)) {
  5201. data = (float *) cur->data;
  5202. } else {
  5203. lctx.buf_copy.resize(ggml_nbytes(cur));
  5204. data = (float *) lctx.buf_copy.data();
  5205. }
  5206. for (int h = 0; h < 1; ++h) {
  5207. for (int j = 0; j < n_tokens; ++j) {
  5208. const llama_pos pos = batch.pos[j];
  5209. const llama_seq_id seq_id = batch.seq_id[j][0];
  5210. for (int i = 0; i < n_kv; ++i) {
  5211. float f;
  5212. if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
  5213. f = -INFINITY;
  5214. } else {
  5215. f = 0;
  5216. }
  5217. data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
  5218. }
  5219. }
  5220. }
  5221. if (data != cur->data) {
  5222. ggml_backend_tensor_set(cur, data, 0, ggml_nbytes(cur));
  5223. }
  5224. }
  5225. alloc_inp_KQ_mask = true;
  5226. }
  5227. if (!alloc_inp_K_shift && strcmp(name, "K_shift") == 0) {
  5228. ggml_allocr_alloc(lctx.alloc, cur);
  5229. if (!ggml_allocr_is_measure(lctx.alloc)) {
  5230. const int64_t n_ctx = cur->ne[0];
  5231. int32_t * data;
  5232. if (ggml_backend_buffer_is_host(cur->buffer)) {
  5233. data = (int32_t *) cur->data;
  5234. } else {
  5235. lctx.buf_copy.resize(ggml_nbytes(cur));
  5236. data = (int32_t *) lctx.buf_copy.data();
  5237. }
  5238. for (int i = 0; i < n_ctx; ++i) {
  5239. data[i] = lctx.kv_self.cells[i].delta;
  5240. }
  5241. if (data != cur->data) {
  5242. ggml_backend_tensor_set(cur, data, 0, ggml_nbytes(cur));
  5243. }
  5244. }
  5245. alloc_inp_K_shift = true;
  5246. }
  5247. // view tensors are not processed further
  5248. if (cur->view_src != nullptr) {
  5249. return;
  5250. }
  5251. if (cur->op != GGML_OP_NONE) {
  5252. n_non_view++;
  5253. }
  5254. //
  5255. // offload layers
  5256. //
  5257. // TODO: will be removed with backend v2
  5258. //#define LLAMA_OFFLOAD_DEBUG
  5259. if (!do_offload) {
  5260. return;
  5261. }
  5262. const int n_layer = model.hparams.n_layer;
  5263. const int n_gpu_layers = model.n_gpu_layers;
  5264. const int i_gpu_start = n_layer - n_gpu_layers;
  5265. // should we offload the final norm? yes if we are not computing embeddings
  5266. const bool offload_emb = lctx.embedding.empty();
  5267. static const std::unordered_map<llm_offload_func_e, std::string, std::hash<int>> k_offload_func_name = {
  5268. { OFFLOAD_FUNC_NOP, "CPU" },
  5269. { OFFLOAD_FUNC_OUT, "CPU" },
  5270. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  5271. { OFFLOAD_FUNC, "GPU (CUDA)" },
  5272. { OFFLOAD_FUNC_FRC, "GPU (CUDA) FRC" },
  5273. { OFFLOAD_FUNC_KQV, "GPU (CUDA) KQV" },
  5274. { OFFLOAD_FUNC_NR, "GPU (CUDA) NR" },
  5275. { OFFLOAD_FUNC_EMB, "GPU (CUDA) EMB" },
  5276. #else
  5277. { OFFLOAD_FUNC, "CPU" },
  5278. { OFFLOAD_FUNC_FRC, "CPU" },
  5279. { OFFLOAD_FUNC_KQV, "CPU" },
  5280. { OFFLOAD_FUNC_NR, "CPU" },
  5281. { OFFLOAD_FUNC_EMB, "CPU" },
  5282. #endif // GGML_USE_CUBLAS
  5283. };
  5284. // check the global map for what offload function to use for this tensor
  5285. llm_offload_func_e func_e = k_offload_func_trie.find(name);
  5286. if (func_e == OFFLOAD_FUNC_NOP) {
  5287. #ifdef LLAMA_OFFLOAD_DEBUG
  5288. // if a tensor hasn't been offloaded, we warn the user
  5289. if (worst_case) {
  5290. LLAMA_LOG_WARN("%s: %32s: not offloaded (ref: %s)\n", __func__,
  5291. cur->name, "https://github.com/ggerganov/llama.cpp/pull/3837");
  5292. }
  5293. #endif
  5294. return;
  5295. }
  5296. // count the number of layers and respect the provided n_gpu_layers
  5297. switch (func_e) {
  5298. case OFFLOAD_FUNC_NOP:
  5299. case OFFLOAD_FUNC_OUT:
  5300. break;
  5301. case OFFLOAD_FUNC:
  5302. if (n_gpu_layers < n_layer) {
  5303. if (il < i_gpu_start) {
  5304. func_e = OFFLOAD_FUNC_NOP;
  5305. }
  5306. }
  5307. break;
  5308. case OFFLOAD_FUNC_FRC:
  5309. if (!lctx.cparams.offload_kqv) {
  5310. func_e = OFFLOAD_FUNC_NOP;
  5311. } break;
  5312. case OFFLOAD_FUNC_KQV:
  5313. if (!lctx.cparams.offload_kqv) {
  5314. func_e = OFFLOAD_FUNC_NOP;
  5315. } else {
  5316. if (n_gpu_layers < n_layer) {
  5317. if (il < i_gpu_start) {
  5318. func_e = OFFLOAD_FUNC_NOP;
  5319. }
  5320. }
  5321. }
  5322. break;
  5323. case OFFLOAD_FUNC_NR:
  5324. if (n_gpu_layers <= n_layer + 0) {
  5325. func_e = OFFLOAD_FUNC_NOP;
  5326. }
  5327. break;
  5328. case OFFLOAD_FUNC_EMB:
  5329. if (!offload_emb || n_gpu_layers < n_layer) {
  5330. func_e = OFFLOAD_FUNC_NOP;
  5331. }
  5332. break;
  5333. default: GGML_ASSERT(false);
  5334. }
  5335. offload_func_t func = ggml_offload_nop;
  5336. // this is needed for compatibility with Metal for example
  5337. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  5338. static offload_func_t ggml_offload_gpu = ggml_cuda_assign_buffers_no_alloc;
  5339. #else
  5340. static offload_func_t ggml_offload_gpu = ggml_offload_nop;
  5341. #endif
  5342. switch (func_e) {
  5343. case OFFLOAD_FUNC_NOP:
  5344. case OFFLOAD_FUNC_OUT: func = ggml_offload_nop; break;
  5345. case OFFLOAD_FUNC:
  5346. case OFFLOAD_FUNC_KQV:
  5347. case OFFLOAD_FUNC_FRC:
  5348. case OFFLOAD_FUNC_NR:
  5349. case OFFLOAD_FUNC_EMB: func = ggml_offload_gpu; break;
  5350. default: GGML_ASSERT(false);
  5351. }
  5352. // apply offload function to the tensor
  5353. func(cur);
  5354. #ifdef LLAMA_OFFLOAD_DEBUG
  5355. if (worst_case) {
  5356. LLAMA_LOG_INFO("%s: %32s: %s\n", __func__, cur->name, k_offload_func_name.at(func_e).c_str());
  5357. }
  5358. #endif
  5359. };
  5360. struct ggml_cgraph * result = NULL;
  5361. struct llm_build_context llm(lctx, batch, cb, worst_case);
  5362. llm.init();
  5363. switch (model.arch) {
  5364. case LLM_ARCH_LLAMA:
  5365. {
  5366. result = llm.build_llama();
  5367. } break;
  5368. case LLM_ARCH_BAICHUAN:
  5369. {
  5370. result = llm.build_baichuan();
  5371. } break;
  5372. case LLM_ARCH_FALCON:
  5373. {
  5374. result = llm.build_falcon();
  5375. } break;
  5376. case LLM_ARCH_STARCODER:
  5377. {
  5378. result = llm.build_starcoder();
  5379. } break;
  5380. case LLM_ARCH_PERSIMMON:
  5381. {
  5382. result = llm.build_persimmon();
  5383. } break;
  5384. case LLM_ARCH_REFACT:
  5385. {
  5386. result = llm.build_refact();
  5387. } break;
  5388. case LLM_ARCH_BLOOM:
  5389. {
  5390. result = llm.build_bloom();
  5391. } break;
  5392. case LLM_ARCH_MPT:
  5393. {
  5394. result = llm.build_mpt();
  5395. } break;
  5396. case LLM_ARCH_STABLELM:
  5397. {
  5398. result = llm.build_stablelm();
  5399. } break;
  5400. case LLM_ARCH_QWEN:
  5401. {
  5402. result = llm.build_qwen();
  5403. } break;
  5404. case LLM_ARCH_PHI2:
  5405. {
  5406. result = llm.build_phi2();
  5407. } break;
  5408. case LLM_ARCH_PLAMO:
  5409. {
  5410. result = llm.build_plamo();
  5411. } break;
  5412. case LLM_ARCH_GPT2:
  5413. {
  5414. result = llm.build_gpt2();
  5415. } break;
  5416. default:
  5417. GGML_ASSERT(false);
  5418. }
  5419. llm.free();
  5420. if (worst_case) {
  5421. int n_non_view_total = 0;
  5422. for (int i = 0; i < result->n_nodes; ++i) {
  5423. if (result->nodes[i]->view_src == nullptr) {
  5424. n_non_view_total++;
  5425. }
  5426. }
  5427. LLAMA_LOG_INFO("%s: non-view tensors processed: %d/%d\n", __func__, n_non_view, n_non_view_total);
  5428. if (n_non_view != n_non_view_total) {
  5429. LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__);
  5430. LLAMA_LOG_WARN("%s: not all non-view tensors have been processed with a callback\n", __func__);
  5431. LLAMA_LOG_WARN("%s: this can indicate an inefficiency in the graph implementation\n", __func__);
  5432. LLAMA_LOG_WARN("%s: build with LLAMA_OFFLOAD_DEBUG for more info\n", __func__);
  5433. LLAMA_LOG_WARN("%s: ref: https://github.com/ggerganov/llama.cpp/pull/3837\n", __func__);
  5434. LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__);
  5435. }
  5436. }
  5437. return result;
  5438. }
  5439. // decode a batch of tokens by evaluating the transformer
  5440. //
  5441. // - lctx: llama context
  5442. // - batch: batch to evaluate
  5443. //
  5444. // return 0 on success
  5445. // return positive int on warning
  5446. // return negative int on error
  5447. //
  5448. static int llama_decode_internal(
  5449. llama_context & lctx,
  5450. llama_batch batch) {
  5451. const uint32_t n_tokens = batch.n_tokens;
  5452. if (n_tokens == 0) {
  5453. LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
  5454. return -1;
  5455. }
  5456. const auto & model = lctx.model;
  5457. const auto & hparams = model.hparams;
  5458. const auto & cparams = lctx.cparams;
  5459. const auto n_batch = cparams.n_batch;
  5460. GGML_ASSERT(n_tokens <= n_batch);
  5461. int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
  5462. GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
  5463. const int64_t t_start_us = ggml_time_us();
  5464. #ifdef GGML_USE_MPI
  5465. // TODO: needs fix after #3228
  5466. GGML_ASSERT(false && "not implemented");
  5467. //ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
  5468. #endif
  5469. GGML_ASSERT(n_threads > 0);
  5470. auto & kv_self = lctx.kv_self;
  5471. GGML_ASSERT(!!kv_self.ctx);
  5472. const int64_t n_embd = hparams.n_embd;
  5473. const int64_t n_vocab = hparams.n_vocab;
  5474. // helpers for smoother batch API transition
  5475. // after deprecating the llama_eval calls, these will be removed
  5476. std::vector<llama_pos> pos;
  5477. std::vector<int32_t> n_seq_id;
  5478. std::vector<llama_seq_id *> seq_id_arr;
  5479. std::vector<std::vector<llama_seq_id>> seq_id;
  5480. if (batch.pos == nullptr) {
  5481. pos.resize(n_tokens);
  5482. for (uint32_t i = 0; i < n_tokens; i++) {
  5483. pos[i] = batch.all_pos_0 + i*batch.all_pos_1;
  5484. }
  5485. batch.pos = pos.data();
  5486. }
  5487. if (batch.seq_id == nullptr) {
  5488. n_seq_id.resize(n_tokens);
  5489. seq_id.resize(n_tokens);
  5490. seq_id_arr.resize(n_tokens);
  5491. for (uint32_t i = 0; i < n_tokens; i++) {
  5492. n_seq_id[i] = 1;
  5493. seq_id[i].resize(1);
  5494. seq_id[i][0] = batch.all_seq_id;
  5495. seq_id_arr[i] = seq_id[i].data();
  5496. }
  5497. batch.n_seq_id = n_seq_id.data();
  5498. batch.seq_id = seq_id_arr.data();
  5499. }
  5500. // if we have enough unused cells before the current head ->
  5501. // better to start searching from the beginning of the cache, hoping to fill it
  5502. if (kv_self.head > kv_self.used + 2*n_tokens) {
  5503. kv_self.head = 0;
  5504. }
  5505. if (!llama_kv_cache_find_slot(kv_self, batch)) {
  5506. return 1;
  5507. }
  5508. // a heuristic, to avoid attending the full cache if it is not yet utilized
  5509. // after enough generations, the benefit from this heuristic disappears
  5510. // if we start defragmenting the cache, the benefit from this will be more important
  5511. kv_self.n = std::min((int32_t) cparams.n_ctx, std::max(32, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32)));
  5512. //kv_self.n = llama_kv_cache_cell_max(kv_self);
  5513. //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
  5514. ggml_allocr_reset(lctx.alloc);
  5515. ggml_cgraph * gf = llama_build_graph(lctx, batch);
  5516. ggml_allocr_alloc_graph(lctx.alloc, gf);
  5517. // the output is always the last tensor in the graph
  5518. struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
  5519. GGML_ASSERT(strcmp(res->name, "result_output") == 0);
  5520. // the embeddings could be the second to last tensor, or the third to last tensor
  5521. struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
  5522. if (strcmp(embeddings->name, "result_norm") != 0) {
  5523. embeddings = gf->nodes[gf->n_nodes - 3];
  5524. GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
  5525. }
  5526. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  5527. char * buf_alloc_base = (char *)ggml_backend_buffer_get_base(lctx.buf_alloc);
  5528. for (int i = 0; i < gf->n_leafs; i++) {
  5529. ggml_tensor * node = gf->leafs[i];
  5530. if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
  5531. ggml_cuda_assign_scratch_offset(node, (char *)node->data - buf_alloc_base);
  5532. ggml_cuda_copy_to_device(node);
  5533. }
  5534. }
  5535. for (int i = 0; i < gf->n_nodes; i++) {
  5536. ggml_tensor * node = gf->nodes[i];
  5537. if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
  5538. ggml_cuda_assign_scratch_offset(node, (char *)node->data - buf_alloc_base);
  5539. }
  5540. }
  5541. // HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed
  5542. if (!lctx.embedding.empty()) {
  5543. embeddings->backend = GGML_BACKEND_CPU;
  5544. }
  5545. res->backend = GGML_BACKEND_CPU;
  5546. #endif
  5547. // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
  5548. // for big prompts, if BLAS is enabled, it is better to use only one thread
  5549. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  5550. // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
  5551. // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
  5552. // with the BLAS calls. need a better solution
  5553. if (n_tokens >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
  5554. n_threads = std::min(4, n_threads);
  5555. }
  5556. const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 1;
  5557. if (ggml_cpu_has_cublas() && fully_offloaded) {
  5558. n_threads = 1;
  5559. }
  5560. #ifdef GGML_USE_MPI
  5561. const int64_t n_layer = hparams.n_layer;
  5562. ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
  5563. #endif
  5564. #ifdef GGML_USE_METAL
  5565. if (ggml_backend_is_metal(lctx.backend)) {
  5566. ggml_backend_metal_set_n_cb(lctx.backend, n_threads);
  5567. }
  5568. #endif
  5569. if (ggml_backend_is_cpu(lctx.backend)) {
  5570. ggml_backend_cpu_set_n_threads(lctx.backend, n_threads);
  5571. }
  5572. ggml_backend_graph_compute(lctx.backend, gf);
  5573. #ifdef GGML_USE_MPI
  5574. ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
  5575. #endif
  5576. // update the kv ring buffer
  5577. {
  5578. if (kv_self.has_shift) {
  5579. kv_self.has_shift = false;
  5580. for (uint32_t i = 0; i < kv_self.size; ++i) {
  5581. kv_self.cells[i].delta = 0;
  5582. }
  5583. }
  5584. kv_self.head += n_tokens;
  5585. // Ensure kv cache head points to a valid index.
  5586. if (kv_self.head >= kv_self.size) {
  5587. kv_self.head = 0;
  5588. }
  5589. }
  5590. #ifdef GGML_PERF
  5591. // print timing information per ggml operation (for debugging purposes)
  5592. // requires GGML_PERF to be defined
  5593. ggml_graph_print(gf);
  5594. #endif
  5595. // plot the computation graph in dot format (for debugging purposes)
  5596. //if (n_past%100 == 0) {
  5597. // ggml_graph_dump_dot(gf, NULL, "llama.dot");
  5598. //}
  5599. // extract logits
  5600. // TODO: do not compute and extract logits if only embeddings are needed
  5601. // need to update the graphs to skip "result_output"
  5602. {
  5603. auto & logits_out = lctx.logits;
  5604. #ifndef NDEBUG
  5605. auto & logits_valid = lctx.logits_valid;
  5606. logits_valid.clear();
  5607. logits_valid.resize(n_tokens);
  5608. logits_out.clear();
  5609. #endif
  5610. if (batch.logits) {
  5611. logits_out.resize(n_vocab * n_tokens);
  5612. for (uint32_t i = 0; i < n_tokens; i++) {
  5613. if (batch.logits[i] == 0) {
  5614. continue;
  5615. }
  5616. ggml_backend_tensor_get(res, logits_out.data() + (n_vocab*i), (n_vocab*i)*sizeof(float), n_vocab*sizeof(float));
  5617. #ifndef NDEBUG
  5618. logits_valid[i] = true;
  5619. #endif
  5620. }
  5621. } else if (lctx.logits_all) {
  5622. logits_out.resize(n_vocab * n_tokens);
  5623. ggml_backend_tensor_get(res, logits_out.data(), 0, n_vocab*n_tokens*sizeof(float));
  5624. #ifndef NDEBUG
  5625. std::fill(logits_valid.begin(), logits_valid.end(), true);
  5626. #endif
  5627. } else {
  5628. logits_out.resize(n_vocab);
  5629. ggml_backend_tensor_get(res, logits_out.data(), (n_vocab*(n_tokens - 1))*sizeof(float), n_vocab*sizeof(float));
  5630. #ifndef NDEBUG
  5631. logits_valid[0] = true;
  5632. #endif
  5633. }
  5634. }
  5635. // extract embeddings
  5636. if (!lctx.embedding.empty()) {
  5637. auto & embedding_out = lctx.embedding;
  5638. embedding_out.resize(n_embd);
  5639. ggml_backend_tensor_get(embeddings, embedding_out.data(), (n_embd*(n_tokens - 1))*sizeof(float), n_embd*sizeof(float));
  5640. }
  5641. // measure the performance only for the single-token evals
  5642. if (n_tokens == 1) {
  5643. lctx.t_eval_us += ggml_time_us() - t_start_us;
  5644. lctx.n_eval++;
  5645. }
  5646. else if (n_tokens > 1) {
  5647. lctx.t_p_eval_us += ggml_time_us() - t_start_us;
  5648. lctx.n_p_eval += n_tokens;
  5649. }
  5650. // get a more accurate load time, upon first eval
  5651. // TODO: fix this
  5652. if (!lctx.has_evaluated_once) {
  5653. lctx.t_load_us = ggml_time_us() - lctx.t_start_us;
  5654. lctx.has_evaluated_once = true;
  5655. }
  5656. return 0;
  5657. }
  5658. //
  5659. // tokenizer
  5660. //
  5661. static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
  5662. return vocab.type;
  5663. }
  5664. static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
  5665. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL;
  5666. }
  5667. static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
  5668. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_UNKNOWN;
  5669. }
  5670. static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
  5671. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_CONTROL;
  5672. }
  5673. static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
  5674. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE;
  5675. }
  5676. static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id) {
  5677. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_USER_DEFINED;
  5678. }
  5679. static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
  5680. GGML_ASSERT(llama_is_byte_token(vocab, id));
  5681. const auto& token_data = vocab.id_to_token.at(id);
  5682. switch (llama_vocab_get_type(vocab)) {
  5683. case LLAMA_VOCAB_TYPE_SPM: {
  5684. auto buf = token_data.text.substr(3, 2);
  5685. return strtol(buf.c_str(), NULL, 16);
  5686. }
  5687. case LLAMA_VOCAB_TYPE_BPE: {
  5688. GGML_ASSERT(false);
  5689. return unicode_to_bytes_bpe(token_data.text);
  5690. }
  5691. default:
  5692. GGML_ASSERT(false);
  5693. }
  5694. }
  5695. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
  5696. static const char * hex = "0123456789ABCDEF";
  5697. switch (llama_vocab_get_type(vocab)) {
  5698. case LLAMA_VOCAB_TYPE_SPM: {
  5699. const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
  5700. return vocab.token_to_id.at(buf);
  5701. }
  5702. case LLAMA_VOCAB_TYPE_BPE: {
  5703. return vocab.token_to_id.at(bytes_to_unicode_bpe(ch));
  5704. }
  5705. default:
  5706. GGML_ASSERT(false);
  5707. }
  5708. }
  5709. static void llama_escape_whitespace(std::string & text) {
  5710. replace_all(text, " ", "\xe2\x96\x81");
  5711. }
  5712. static void llama_unescape_whitespace(std::string & word) {
  5713. replace_all(word, "\xe2\x96\x81", " ");
  5714. }
  5715. struct llm_symbol {
  5716. using index = int;
  5717. index prev;
  5718. index next;
  5719. const char * text;
  5720. size_t n;
  5721. };
  5722. static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
  5723. // SPM tokenizer
  5724. // original implementation:
  5725. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  5726. struct llm_bigram_spm {
  5727. struct comparator {
  5728. bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
  5729. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  5730. }
  5731. };
  5732. using queue_storage = std::vector<llm_bigram_spm>;
  5733. using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
  5734. llm_symbol::index left;
  5735. llm_symbol::index right;
  5736. float score;
  5737. size_t size;
  5738. };
  5739. struct llm_tokenizer_spm {
  5740. llm_tokenizer_spm(const llama_vocab & vocab): vocab(vocab) {}
  5741. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  5742. // split string into utf8 chars
  5743. int index = 0;
  5744. size_t offs = 0;
  5745. while (offs < text.size()) {
  5746. llm_symbol sym;
  5747. size_t len = utf8_len(text[offs]);
  5748. sym.text = text.c_str() + offs;
  5749. sym.n = std::min(len, text.size() - offs);
  5750. offs += sym.n;
  5751. sym.prev = index - 1;
  5752. sym.next = offs == text.size() ? -1 : index + 1;
  5753. index++;
  5754. symbols.emplace_back(sym);
  5755. }
  5756. // seed the work queue with all possible 2-character tokens.
  5757. for (size_t i = 1; i < symbols.size(); ++i) {
  5758. try_add_bigram(i - 1, i);
  5759. }
  5760. // keep substituting the highest frequency pairs for as long as we can.
  5761. while (!work_queue.empty()) {
  5762. auto bigram = work_queue.top();
  5763. work_queue.pop();
  5764. auto & left_sym = symbols[bigram.left];
  5765. auto & right_sym = symbols[bigram.right];
  5766. // if one of the symbols already got merged, skip it.
  5767. if (left_sym.n == 0 || right_sym.n == 0 ||
  5768. left_sym.n + right_sym.n != bigram.size) {
  5769. continue;
  5770. }
  5771. // merge the right sym into the left one
  5772. left_sym.n += right_sym.n;
  5773. right_sym.n = 0;
  5774. //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  5775. // remove the right sym from the chain
  5776. left_sym.next = right_sym.next;
  5777. if (right_sym.next >= 0) {
  5778. symbols[right_sym.next].prev = bigram.left;
  5779. }
  5780. // find more substitutions
  5781. try_add_bigram(left_sym.prev, bigram.left);
  5782. try_add_bigram(bigram.left, left_sym.next);
  5783. }
  5784. for (int i = 0; i != -1; i = symbols[i].next) {
  5785. auto & symbol = symbols[i];
  5786. resegment(symbol, output);
  5787. }
  5788. }
  5789. private:
  5790. void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
  5791. auto text = std::string(symbol.text, symbol.n);
  5792. auto token = vocab.token_to_id.find(text);
  5793. // Do we need to support is_unused?
  5794. if (token != vocab.token_to_id.end()) {
  5795. output.push_back((*token).second);
  5796. return;
  5797. }
  5798. const auto p = rev_merge.find(text);
  5799. if (p == rev_merge.end()) {
  5800. // output any symbols that did not form tokens as bytes.
  5801. for (int j = 0; j < (int)symbol.n; ++j) {
  5802. llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
  5803. output.push_back(token_id);
  5804. }
  5805. return;
  5806. }
  5807. resegment(symbols[p->second.first], output);
  5808. resegment(symbols[p->second.second], output);
  5809. }
  5810. void try_add_bigram(int left, int right) {
  5811. if (left == -1 || right == -1) {
  5812. return;
  5813. }
  5814. const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
  5815. auto token = vocab.token_to_id.find(text);
  5816. if (token == vocab.token_to_id.end()) {
  5817. return;
  5818. }
  5819. if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
  5820. return;
  5821. }
  5822. const auto & tok_data = vocab.id_to_token[(*token).second];
  5823. llm_bigram_spm bigram;
  5824. bigram.left = left;
  5825. bigram.right = right;
  5826. bigram.score = tok_data.score;
  5827. bigram.size = text.size();
  5828. work_queue.push(bigram);
  5829. // Do we need to support is_unused?
  5830. rev_merge[text] = std::make_pair(left, right);
  5831. }
  5832. const llama_vocab & vocab;
  5833. std::vector<llm_symbol> symbols;
  5834. llm_bigram_spm::queue work_queue;
  5835. std::map<std::string, std::pair<int, int>> rev_merge;
  5836. };
  5837. // BPE tokenizer
  5838. // adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
  5839. // tried to simplify unicode stuff, so most likely does not work 100% correctly!
  5840. // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
  5841. struct llm_bigram_bpe {
  5842. struct comparator {
  5843. bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
  5844. return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
  5845. }
  5846. };
  5847. using queue_storage = std::vector<llm_bigram_bpe>;
  5848. using queue = std::priority_queue<llm_bigram_bpe, queue_storage, comparator>;
  5849. llm_symbol::index left;
  5850. llm_symbol::index right;
  5851. std::string text;
  5852. int rank;
  5853. size_t size;
  5854. };
  5855. struct llm_tokenizer_bpe {
  5856. llm_tokenizer_bpe(const llama_vocab & vocab): vocab(vocab) {}
  5857. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  5858. int final_prev_index = -1;
  5859. auto word_collection = bpe_gpt2_preprocess(text);
  5860. symbols_final.clear();
  5861. for (auto & word : word_collection) {
  5862. work_queue = llm_bigram_bpe::queue();
  5863. symbols.clear();
  5864. int index = 0;
  5865. size_t offset = 0;
  5866. while (offset < word.size()) {
  5867. llm_symbol sym;
  5868. size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
  5869. sym.text = word.c_str() + offset;
  5870. sym.n = char_len;
  5871. offset += sym.n;
  5872. sym.prev = index - 1;
  5873. sym.next = offset == word.size() ? -1 : index + 1;
  5874. index++;
  5875. symbols.emplace_back(sym);
  5876. }
  5877. for (size_t i = 1; i < symbols.size(); ++i) {
  5878. add_new_bigram(i - 1, i);
  5879. }
  5880. // build token(s)
  5881. while (!work_queue.empty()) {
  5882. auto bigram = work_queue.top();
  5883. work_queue.pop();
  5884. auto & left_symbol = symbols[bigram.left];
  5885. auto & right_symbol = symbols[bigram.right];
  5886. if (left_symbol.n == 0 || right_symbol.n == 0) {
  5887. continue;
  5888. }
  5889. std::string left_token = std::string(left_symbol.text, left_symbol.n);
  5890. std::string right_token = std::string(right_symbol.text, right_symbol.n);
  5891. if (left_token + right_token != bigram.text) {
  5892. continue; // Skip this bigram if it's outdated
  5893. }
  5894. // merge the right sym into the left one
  5895. left_symbol.n += right_symbol.n;
  5896. right_symbol.n = 0;
  5897. // remove the right sym from the chain
  5898. left_symbol.next = right_symbol.next;
  5899. if (right_symbol.next >= 0) {
  5900. symbols[right_symbol.next].prev = bigram.left;
  5901. }
  5902. add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
  5903. add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
  5904. }
  5905. // add the fnished tokens to the final list keeping correct order for next and prev
  5906. for (auto & sym : symbols) {
  5907. if (sym.n > 0) {
  5908. sym.prev = final_prev_index;
  5909. sym.next = -1;
  5910. if (final_prev_index != -1) {
  5911. symbols_final[final_prev_index].next = symbols_final.size();
  5912. }
  5913. symbols_final.emplace_back(sym);
  5914. final_prev_index = symbols_final.size() - 1;
  5915. }
  5916. }
  5917. }
  5918. symbols = symbols_final;
  5919. if (!symbols.empty()) {
  5920. for (int i = 0; i != -1; i = symbols[i].next) {
  5921. auto & symbol = symbols[i];
  5922. if (symbol.n == 0) {
  5923. continue;
  5924. }
  5925. const std::string str = std::string(symbol.text, symbol.n);
  5926. const auto token = vocab.token_to_id.find(str);
  5927. if (token == vocab.token_to_id.end()) {
  5928. for (auto j = str.begin(); j != str.end(); ++j) {
  5929. std::string byte_str(1, *j);
  5930. auto token_multibyte = vocab.token_to_id.find(byte_str);
  5931. if (token_multibyte == vocab.token_to_id.end()) {
  5932. throw std::runtime_error("ERROR: byte not found in vocab");
  5933. }
  5934. output.push_back((*token_multibyte).second);
  5935. }
  5936. } else {
  5937. output.push_back((*token).second);
  5938. }
  5939. }
  5940. }
  5941. }
  5942. private:
  5943. void add_new_bigram(int left, int right) {
  5944. if (left == -1 || right == -1) {
  5945. return;
  5946. }
  5947. std::string left_token = std::string(symbols[left].text, symbols[left].n);
  5948. std::string right_token = std::string(symbols[right].text, symbols[right].n);
  5949. int rank_found = -1;
  5950. rank_found = vocab.find_bpe_rank(left_token, right_token);
  5951. if (rank_found < 0) {
  5952. return;
  5953. }
  5954. llm_bigram_bpe bigram;
  5955. bigram.left = left;
  5956. bigram.right = right;
  5957. bigram.text = left_token + right_token;
  5958. bigram.size = left_token.size() + right_token.size();
  5959. bigram.rank = rank_found;
  5960. work_queue.push(bigram);
  5961. }
  5962. std::vector<std::string> bpe_gpt2_preprocess(const std::string & text) {
  5963. std::vector<std::string> bpe_words;
  5964. std::vector<std::string> bpe_encoded_words;
  5965. std::string token = "";
  5966. // GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+
  5967. bool collecting_numeric = false;
  5968. bool collecting_letter = false;
  5969. bool collecting_special = false;
  5970. bool collecting_whitespace_lookahead = false;
  5971. bool collecting = false;
  5972. std::vector<std::string> text_utf;
  5973. text_utf.reserve(text.size());
  5974. bpe_words.reserve(text.size());
  5975. bpe_encoded_words.reserve(text.size());
  5976. auto cps = codepoints_from_utf8(text);
  5977. for (size_t i = 0; i < cps.size(); ++i)
  5978. text_utf.emplace_back(codepoint_to_utf8(cps[i]));
  5979. for (int i = 0; i < (int)text_utf.size(); i++) {
  5980. const std::string & utf_char = text_utf[i];
  5981. bool split_condition = false;
  5982. int bytes_remain = text_utf.size() - i;
  5983. // forward backward lookups
  5984. const std::string & utf_char_next = (i + 1 < (int)text_utf.size()) ? text_utf[i + 1] : "";
  5985. const std::string & utf_char_next_next = (i + 2 < (int)text_utf.size()) ? text_utf[i + 2] : "";
  5986. // handling contractions
  5987. if (!split_condition && bytes_remain >= 2) {
  5988. // 's|'t|'m|'d
  5989. if (utf_char == "\'" && (utf_char_next == "s" || utf_char_next == "t" || utf_char_next == "m" || utf_char_next == "d")) {
  5990. split_condition = true;
  5991. }
  5992. if (split_condition) {
  5993. if (token.size()) {
  5994. bpe_words.emplace_back(token); // push previous content as token
  5995. }
  5996. token = utf_char + utf_char_next;
  5997. bpe_words.emplace_back(token);
  5998. token = "";
  5999. i++;
  6000. continue;
  6001. }
  6002. }
  6003. if (!split_condition && bytes_remain >= 3) {
  6004. // 're|'ve|'ll
  6005. if (utf_char == "\'" && (
  6006. (utf_char_next == "r" && utf_char_next_next == "e") ||
  6007. (utf_char_next == "v" && utf_char_next_next == "e") ||
  6008. (utf_char_next == "l" && utf_char_next_next == "l"))
  6009. ) {
  6010. split_condition = true;
  6011. }
  6012. if (split_condition) {
  6013. // current token + next token can be defined
  6014. if (token.size()) {
  6015. bpe_words.emplace_back(token); // push previous content as token
  6016. }
  6017. token = utf_char + utf_char_next + utf_char_next_next;
  6018. bpe_words.emplace_back(token); // the contraction
  6019. token = "";
  6020. i += 2;
  6021. continue;
  6022. }
  6023. }
  6024. if (!split_condition && !collecting) {
  6025. if (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER)) {
  6026. collecting_letter = true;
  6027. collecting = true;
  6028. }
  6029. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
  6030. collecting_numeric = true;
  6031. collecting = true;
  6032. }
  6033. else if (
  6034. ((codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) && (codepoint_type(utf_char) != CODEPOINT_TYPE_WHITESPACE)) ||
  6035. (!token.size() && utf_char == " " && codepoint_type(utf_char_next) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char_next) != CODEPOINT_TYPE_DIGIT && codepoint_type(utf_char_next) != CODEPOINT_TYPE_WHITESPACE)
  6036. ) {
  6037. collecting_special = true;
  6038. collecting = true;
  6039. }
  6040. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE && codepoint_type(utf_char_next) == CODEPOINT_TYPE_WHITESPACE) {
  6041. collecting_whitespace_lookahead = true;
  6042. collecting = true;
  6043. }
  6044. else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE) {
  6045. split_condition = true;
  6046. }
  6047. }
  6048. else if (!split_condition && collecting) {
  6049. if (collecting_letter && codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER) {
  6050. split_condition = true;
  6051. }
  6052. else if (collecting_numeric && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) {
  6053. split_condition = true;
  6054. }
  6055. else if (collecting_special && (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE)) {
  6056. split_condition = true;
  6057. }
  6058. else if (collecting_whitespace_lookahead && (codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
  6059. split_condition = true;
  6060. }
  6061. }
  6062. if (utf_char_next == "") {
  6063. split_condition = true; // final
  6064. token += utf_char;
  6065. }
  6066. if (split_condition) {
  6067. if (token.size()) {
  6068. bpe_words.emplace_back(token);
  6069. }
  6070. token = utf_char;
  6071. collecting = false;
  6072. collecting_letter = false;
  6073. collecting_numeric = false;
  6074. collecting_special = false;
  6075. collecting_whitespace_lookahead = false;
  6076. }
  6077. else {
  6078. token += utf_char;
  6079. }
  6080. }
  6081. for (std::string & word : bpe_words) {
  6082. std::string encoded_token = "";
  6083. for (char & c : word) {
  6084. encoded_token += bytes_to_unicode_bpe(c);
  6085. }
  6086. bpe_encoded_words.emplace_back(encoded_token);
  6087. }
  6088. return bpe_encoded_words;
  6089. }
  6090. const llama_vocab & vocab;
  6091. std::vector<llm_symbol> symbols;
  6092. std::vector<llm_symbol> symbols_final;
  6093. llm_bigram_bpe::queue work_queue;
  6094. };
  6095. typedef enum FRAGMENT_BUFFER_VARIANT_TYPE{
  6096. FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
  6097. FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
  6098. } FRAGMENT_BUFFER_VARIANT_TYPE;
  6099. struct fragment_buffer_variant{
  6100. fragment_buffer_variant(llama_vocab::id _token)
  6101. :
  6102. type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
  6103. token(_token),
  6104. raw_text(_dummy),
  6105. offset(0),
  6106. length(0){}
  6107. fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
  6108. :
  6109. type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
  6110. token((llama_vocab::id)-1),
  6111. raw_text(_raw_text),
  6112. offset(_offset),
  6113. length(_length){
  6114. GGML_ASSERT( _offset >= 0 );
  6115. GGML_ASSERT( _length >= 1 );
  6116. GGML_ASSERT( offset + length <= raw_text.length() );
  6117. }
  6118. const FRAGMENT_BUFFER_VARIANT_TYPE type;
  6119. const llama_vocab::id token;
  6120. const std::string _dummy;
  6121. const std::string & raw_text;
  6122. const uint64_t offset;
  6123. const uint64_t length;
  6124. };
  6125. // #define PRETOKENIZERDEBUG
  6126. static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer)
  6127. {
  6128. // for each special token
  6129. for (const auto & st: vocab.special_tokens_cache) {
  6130. const auto & special_token = st.first;
  6131. const auto & special_id = st.second;
  6132. // for each text fragment
  6133. std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
  6134. while (it != buffer.end()) {
  6135. auto & fragment = (*it);
  6136. // if a fragment is text ( not yet processed )
  6137. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  6138. auto * raw_text = &(fragment.raw_text);
  6139. auto raw_text_base_offset = fragment.offset;
  6140. auto raw_text_base_length = fragment.length;
  6141. // loop over the text
  6142. while (true) {
  6143. // find the first occurrence of a given special token in this fragment
  6144. // passing offset argument only limit the "search area" but match coordinates
  6145. // are still relative to the source full raw_text
  6146. auto match = raw_text->find(special_token, raw_text_base_offset);
  6147. // no occurrences found, stop processing this fragment for a given special token
  6148. if (match == std::string::npos) break;
  6149. // check if match is within bounds of offset <-> length
  6150. if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
  6151. #ifdef PRETOKENIZERDEBUG
  6152. fprintf(stderr, "FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  6153. #endif
  6154. auto source = std::distance(buffer.begin(), it);
  6155. // if match is further than base offset
  6156. // then we have some text to the left of it
  6157. if (match > raw_text_base_offset) {
  6158. // left
  6159. const int64_t left_reminder_offset = raw_text_base_offset + 0;
  6160. const int64_t left_reminder_length = match - raw_text_base_offset;
  6161. buffer.emplace_after(it, (*raw_text), left_reminder_offset, left_reminder_length);
  6162. #ifdef PRETOKENIZERDEBUG
  6163. fprintf(stderr, "FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
  6164. #endif
  6165. it++;
  6166. }
  6167. // special token
  6168. buffer.emplace_after(it, special_id);
  6169. it++;
  6170. // right
  6171. if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
  6172. const int64_t right_reminder_offset = match + special_token.length();
  6173. const int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
  6174. buffer.emplace_after(it, (*raw_text), right_reminder_offset, right_reminder_length);
  6175. #ifdef PRETOKENIZERDEBUG
  6176. fprintf(stderr, "FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
  6177. #endif
  6178. it++;
  6179. if (source == 0) {
  6180. buffer.erase_after(buffer.before_begin());
  6181. } else {
  6182. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  6183. }
  6184. // repeat for the right side
  6185. raw_text_base_offset = right_reminder_offset;
  6186. raw_text_base_length = right_reminder_length;
  6187. #ifdef PRETOKENIZERDEBUG
  6188. fprintf(stderr, "RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  6189. #endif
  6190. } else {
  6191. if (source == 0) {
  6192. buffer.erase_after(buffer.before_begin());
  6193. } else {
  6194. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  6195. }
  6196. break;
  6197. }
  6198. }
  6199. }
  6200. it++;
  6201. }
  6202. }
  6203. }
  6204. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special) {
  6205. std::vector<llama_vocab::id> output;
  6206. // OG tokenizer behavior:
  6207. //
  6208. // tokenizer.encode('', add_bos=True) returns [1]
  6209. // tokenizer.encode('', add_bos=False) returns []
  6210. if (bos && vocab.special_bos_id != -1) {
  6211. output.push_back(vocab.special_bos_id);
  6212. }
  6213. if (raw_text.empty()) {
  6214. return output;
  6215. }
  6216. std::forward_list<fragment_buffer_variant> fragment_buffer;
  6217. fragment_buffer.emplace_front( raw_text, 0, raw_text.length() );
  6218. if (special) tokenizer_st_partition( vocab, fragment_buffer );
  6219. switch (vocab.type) {
  6220. case LLAMA_VOCAB_TYPE_SPM:
  6221. {
  6222. for (const auto & fragment: fragment_buffer)
  6223. {
  6224. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT)
  6225. {
  6226. // without adding this leading whitespace, we do not get the same results as the original tokenizer
  6227. // TODO: It's likely possible to get rid of this string copy entirely
  6228. // by modifying llm_tokenizer_x to operate with string offsets like pre-tokenizer
  6229. // and passing 'add space prefix' as bool argument
  6230. //
  6231. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  6232. if (&fragment == &fragment_buffer.front()) {
  6233. raw_text = " " + raw_text; // prefix with space if the first token is not special
  6234. }
  6235. #ifdef PRETOKENIZERDEBUG
  6236. fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  6237. #endif
  6238. llm_tokenizer_spm tokenizer(vocab);
  6239. llama_escape_whitespace(raw_text);
  6240. tokenizer.tokenize(raw_text, output);
  6241. }
  6242. else // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  6243. {
  6244. output.push_back(fragment.token);
  6245. }
  6246. }
  6247. } break;
  6248. case LLAMA_VOCAB_TYPE_BPE:
  6249. {
  6250. for (const auto & fragment: fragment_buffer)
  6251. {
  6252. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT)
  6253. {
  6254. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  6255. #ifdef PRETOKENIZERDEBUG
  6256. fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  6257. #endif
  6258. llm_tokenizer_bpe tokenizer(vocab);
  6259. tokenizer.tokenize(raw_text, output);
  6260. }
  6261. else // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  6262. {
  6263. output.push_back(fragment.token);
  6264. }
  6265. }
  6266. } break;
  6267. }
  6268. return output;
  6269. }
  6270. //
  6271. // grammar - internal
  6272. //
  6273. struct llama_partial_utf8 {
  6274. uint32_t value; // bit value so far (unshifted)
  6275. int n_remain; // num bytes remaining; -1 indicates invalid sequence
  6276. };
  6277. struct llama_grammar {
  6278. const std::vector<std::vector<llama_grammar_element>> rules;
  6279. std::vector<std::vector<const llama_grammar_element *>> stacks;
  6280. // buffer for partially generated UTF-8 sequence from accepted tokens
  6281. llama_partial_utf8 partial_utf8;
  6282. };
  6283. struct llama_grammar_candidate {
  6284. size_t index;
  6285. const uint32_t * code_points;
  6286. llama_partial_utf8 partial_utf8;
  6287. };
  6288. // Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
  6289. // pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
  6290. static std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
  6291. const std::string & src,
  6292. llama_partial_utf8 partial_start) {
  6293. static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
  6294. const char * pos = src.c_str();
  6295. std::vector<uint32_t> code_points;
  6296. // common english strings have the same number of codepoints and bytes. `+ 1` for the terminating 0.
  6297. code_points.reserve(src.size() + 1);
  6298. uint32_t value = partial_start.value;
  6299. int n_remain = partial_start.n_remain;
  6300. // continue previous decode, if applicable
  6301. while (*pos != 0 && n_remain > 0) {
  6302. uint8_t next_byte = static_cast<uint8_t>(*pos);
  6303. if ((next_byte >> 6) != 2) {
  6304. // invalid sequence, abort
  6305. code_points.push_back(0);
  6306. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, -1 });
  6307. }
  6308. value = (value << 6) + (next_byte & 0x3F);
  6309. ++pos;
  6310. --n_remain;
  6311. }
  6312. if (partial_start.n_remain > 0 && n_remain == 0) {
  6313. code_points.push_back(value);
  6314. }
  6315. // decode any subsequent utf-8 sequences, which may end in an incomplete one
  6316. while (*pos != 0) {
  6317. uint8_t first_byte = static_cast<uint8_t>(*pos);
  6318. uint8_t highbits = first_byte >> 4;
  6319. n_remain = lookup[highbits] - 1;
  6320. if (n_remain < 0) {
  6321. // invalid sequence, abort
  6322. code_points.clear();
  6323. code_points.push_back(0);
  6324. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, n_remain });
  6325. }
  6326. uint8_t mask = (1 << (7 - n_remain)) - 1;
  6327. value = first_byte & mask;
  6328. ++pos;
  6329. while (*pos != 0 && n_remain > 0) {
  6330. value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
  6331. ++pos;
  6332. --n_remain;
  6333. }
  6334. if (n_remain == 0) {
  6335. code_points.push_back(value);
  6336. }
  6337. }
  6338. code_points.push_back(0);
  6339. return std::make_pair(std::move(code_points), llama_partial_utf8{ value, n_remain });
  6340. }
  6341. // returns true iff pos points to the end of one of the definitions of a rule
  6342. static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
  6343. switch (pos->type) {
  6344. case LLAMA_GRETYPE_END: return true; // NOLINT
  6345. case LLAMA_GRETYPE_ALT: return true; // NOLINT
  6346. default: return false;
  6347. }
  6348. }
  6349. // returns true iff chr satisfies the char range at pos (regular or inverse range)
  6350. // asserts that pos is pointing to a char range element
  6351. static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
  6352. const llama_grammar_element * pos,
  6353. const uint32_t chr) {
  6354. bool found = false;
  6355. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  6356. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); // NOLINT
  6357. do {
  6358. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  6359. // inclusive range, e.g. [a-z]
  6360. found = found || (pos->value <= chr && chr <= pos[1].value);
  6361. pos += 2;
  6362. } else {
  6363. // exact char match, e.g. [a] or "a"
  6364. found = found || pos->value == chr;
  6365. pos += 1;
  6366. }
  6367. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  6368. return std::make_pair(found == is_positive_char, pos);
  6369. }
  6370. // returns true iff some continuation of the given partial UTF-8 sequence could satisfy the char
  6371. // range at pos (regular or inverse range)
  6372. // asserts that pos is pointing to a char range element
  6373. static bool llama_grammar_match_partial_char(
  6374. const llama_grammar_element * pos,
  6375. const llama_partial_utf8 partial_utf8) {
  6376. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  6377. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  6378. uint32_t partial_value = partial_utf8.value;
  6379. int n_remain = partial_utf8.n_remain;
  6380. // invalid sequence or 7-bit char split across 2 bytes (overlong)
  6381. if (n_remain < 0 || (n_remain == 1 && partial_value < 2)) {
  6382. return false;
  6383. }
  6384. // range of possible code points this partial UTF-8 sequence could complete to
  6385. uint32_t low = partial_value << (n_remain * 6);
  6386. uint32_t high = low | ((1 << (n_remain * 6)) - 1);
  6387. if (low == 0) {
  6388. if (n_remain == 2) {
  6389. low = 1 << 11;
  6390. } else if (n_remain == 3) {
  6391. low = 1 << 16;
  6392. }
  6393. }
  6394. do {
  6395. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  6396. // inclusive range, e.g. [a-z]
  6397. if (pos->value <= high && low <= pos[1].value) {
  6398. return is_positive_char;
  6399. }
  6400. pos += 2;
  6401. } else {
  6402. // exact char match, e.g. [a] or "a"
  6403. if (low <= pos->value && pos->value <= high) {
  6404. return is_positive_char;
  6405. }
  6406. pos += 1;
  6407. }
  6408. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  6409. return !is_positive_char;
  6410. }
  6411. // transforms a grammar pushdown stack into N possible stacks, all ending
  6412. // at a character range (terminal element)
  6413. static void llama_grammar_advance_stack(
  6414. const std::vector<std::vector<llama_grammar_element>> & rules,
  6415. const std::vector<const llama_grammar_element *> & stack,
  6416. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  6417. if (stack.empty()) {
  6418. new_stacks.emplace_back(stack);
  6419. return;
  6420. }
  6421. const llama_grammar_element * pos = stack.back();
  6422. switch (pos->type) {
  6423. case LLAMA_GRETYPE_RULE_REF: {
  6424. const size_t rule_id = static_cast<size_t>(pos->value);
  6425. const llama_grammar_element * subpos = rules[rule_id].data();
  6426. do {
  6427. // init new stack without the top (pos)
  6428. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  6429. if (!llama_grammar_is_end_of_sequence(pos + 1)) {
  6430. // if this rule ref is followed by another element, add that to stack
  6431. new_stack.push_back(pos + 1);
  6432. }
  6433. if (!llama_grammar_is_end_of_sequence(subpos)) {
  6434. // if alternate is nonempty, add to stack
  6435. new_stack.push_back(subpos);
  6436. }
  6437. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  6438. while (!llama_grammar_is_end_of_sequence(subpos)) {
  6439. // scan to end of alternate def
  6440. subpos++;
  6441. }
  6442. if (subpos->type == LLAMA_GRETYPE_ALT) {
  6443. // there's another alternate def of this rule to process
  6444. subpos++;
  6445. } else {
  6446. break;
  6447. }
  6448. } while (true);
  6449. break;
  6450. }
  6451. case LLAMA_GRETYPE_CHAR:
  6452. case LLAMA_GRETYPE_CHAR_NOT:
  6453. new_stacks.emplace_back(stack);
  6454. break;
  6455. default:
  6456. // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
  6457. // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
  6458. // those
  6459. GGML_ASSERT(false);
  6460. }
  6461. }
  6462. // takes a set of possible pushdown stacks on a grammar, which are required to
  6463. // be positioned at a character range (see `llama_grammar_advance_stack`), and
  6464. // produces the N possible stacks if the given char is accepted at those
  6465. // positions
  6466. static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
  6467. const std::vector<std::vector<llama_grammar_element>> & rules,
  6468. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  6469. const uint32_t chr) {
  6470. std::vector<std::vector<const llama_grammar_element *>> new_stacks;
  6471. for (const auto & stack : stacks) {
  6472. if (stack.empty()) {
  6473. continue;
  6474. }
  6475. auto match = llama_grammar_match_char(stack.back(), chr);
  6476. if (match.first) {
  6477. const llama_grammar_element * pos = match.second;
  6478. // update top of stack to next element, if any
  6479. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  6480. if (!llama_grammar_is_end_of_sequence(pos)) {
  6481. new_stack.push_back(pos);
  6482. }
  6483. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  6484. }
  6485. }
  6486. return new_stacks;
  6487. }
  6488. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  6489. const std::vector<std::vector<llama_grammar_element>> & rules,
  6490. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  6491. const std::vector<llama_grammar_candidate> & candidates);
  6492. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
  6493. const std::vector<std::vector<llama_grammar_element>> & rules,
  6494. const std::vector<const llama_grammar_element *> & stack,
  6495. const std::vector<llama_grammar_candidate> & candidates) {
  6496. std::vector<llama_grammar_candidate> rejects;
  6497. if (stack.empty()) {
  6498. for (const auto & tok : candidates) {
  6499. if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
  6500. rejects.push_back(tok);
  6501. }
  6502. }
  6503. return rejects;
  6504. }
  6505. const llama_grammar_element * stack_pos = stack.back();
  6506. std::vector<llama_grammar_candidate> next_candidates;
  6507. for (const auto & tok : candidates) {
  6508. if (*tok.code_points == 0) {
  6509. // reached end of full codepoints in token, reject iff it ended in a partial sequence
  6510. // that cannot satisfy this position in grammar
  6511. if (tok.partial_utf8.n_remain != 0 &&
  6512. !llama_grammar_match_partial_char(stack_pos, tok.partial_utf8)) {
  6513. rejects.push_back(tok);
  6514. }
  6515. } else if (llama_grammar_match_char(stack_pos, *tok.code_points).first) {
  6516. next_candidates.push_back({ tok.index, tok.code_points + 1, tok.partial_utf8 });
  6517. } else {
  6518. rejects.push_back(tok);
  6519. }
  6520. }
  6521. const auto * stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
  6522. // update top of stack to next element, if any
  6523. std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
  6524. if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
  6525. stack_after.push_back(stack_pos_after);
  6526. }
  6527. std::vector<std::vector<const llama_grammar_element *>> next_stacks;
  6528. llama_grammar_advance_stack(rules, stack_after, next_stacks);
  6529. auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
  6530. for (const auto & tok : next_rejects) {
  6531. rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
  6532. }
  6533. return rejects;
  6534. }
  6535. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  6536. const std::vector<std::vector<llama_grammar_element>> & rules,
  6537. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  6538. const std::vector<llama_grammar_candidate> & candidates) {
  6539. GGML_ASSERT(!stacks.empty()); // REVIEW
  6540. if (candidates.empty()) {
  6541. return std::vector<llama_grammar_candidate>();
  6542. }
  6543. auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
  6544. for (size_t i = 1, size = stacks.size(); i < size; ++i) {
  6545. rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
  6546. }
  6547. return rejects;
  6548. }
  6549. //
  6550. // grammar - external
  6551. //
  6552. struct llama_grammar * llama_grammar_init(
  6553. const llama_grammar_element ** rules,
  6554. size_t n_rules,
  6555. size_t start_rule_index) {
  6556. const llama_grammar_element * pos;
  6557. // copy rule definitions into vectors
  6558. std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
  6559. for (size_t i = 0; i < n_rules; i++) {
  6560. for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
  6561. vec_rules[i].push_back(*pos);
  6562. }
  6563. vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
  6564. }
  6565. // loop over alternates of start rule to build initial stacks
  6566. std::vector<std::vector<const llama_grammar_element *>> stacks;
  6567. pos = rules[start_rule_index];
  6568. do {
  6569. std::vector<const llama_grammar_element *> stack;
  6570. if (!llama_grammar_is_end_of_sequence(pos)) {
  6571. // if alternate is nonempty, add to stack
  6572. stack.push_back(pos);
  6573. }
  6574. llama_grammar_advance_stack(vec_rules, stack, stacks);
  6575. while (!llama_grammar_is_end_of_sequence(pos)) {
  6576. // scan to end of alternate def
  6577. pos++;
  6578. }
  6579. if (pos->type == LLAMA_GRETYPE_ALT) {
  6580. // there's another alternate def of this rule to process
  6581. pos++;
  6582. } else {
  6583. break;
  6584. }
  6585. } while (true);
  6586. return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} };
  6587. }
  6588. void llama_grammar_free(struct llama_grammar * grammar) {
  6589. delete grammar;
  6590. }
  6591. struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar) {
  6592. llama_grammar * result = new llama_grammar{ grammar->rules, grammar->stacks, grammar->partial_utf8 };
  6593. // redirect elements in stacks to point to new rules
  6594. for (size_t is = 0; is < result->stacks.size(); is++) {
  6595. for (size_t ie = 0; ie < result->stacks[is].size(); ie++) {
  6596. for (size_t ir0 = 0; ir0 < grammar->rules.size(); ir0++) {
  6597. for (size_t ir1 = 0; ir1 < grammar->rules[ir0].size(); ir1++) {
  6598. if (grammar->stacks[is][ie] == &grammar->rules[ir0][ir1]) {
  6599. result->stacks[is][ie] = &result->rules[ir0][ir1];
  6600. }
  6601. }
  6602. }
  6603. }
  6604. }
  6605. return result;
  6606. }
  6607. //
  6608. // sampling
  6609. //
  6610. void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
  6611. if (seed == LLAMA_DEFAULT_SEED) {
  6612. seed = time(NULL);
  6613. }
  6614. ctx->rng.seed(seed);
  6615. }
  6616. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  6617. GGML_ASSERT(candidates->size > 0);
  6618. const int64_t t_start_sample_us = ggml_time_us();
  6619. // Sort the logits in descending order
  6620. if (!candidates->sorted) {
  6621. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  6622. return a.logit > b.logit;
  6623. });
  6624. candidates->sorted = true;
  6625. }
  6626. float max_l = candidates->data[0].logit;
  6627. float cum_sum = 0.0f;
  6628. for (size_t i = 0; i < candidates->size; ++i) {
  6629. float p = expf(candidates->data[i].logit - max_l);
  6630. candidates->data[i].p = p;
  6631. cum_sum += p;
  6632. }
  6633. for (size_t i = 0; i < candidates->size; ++i) {
  6634. candidates->data[i].p /= cum_sum;
  6635. }
  6636. if (ctx) {
  6637. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6638. }
  6639. }
  6640. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int32_t k, size_t min_keep) {
  6641. const int64_t t_start_sample_us = ggml_time_us();
  6642. k = std::max(k, (int) min_keep);
  6643. k = std::min(k, (int) candidates->size);
  6644. // Sort scores in descending order
  6645. if (!candidates->sorted) {
  6646. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  6647. return a.logit > b.logit;
  6648. };
  6649. if (k == (int) candidates->size) {
  6650. std::sort(candidates->data, candidates->data + candidates->size, comp);
  6651. } else {
  6652. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  6653. }
  6654. candidates->sorted = true;
  6655. }
  6656. candidates->size = k;
  6657. if (ctx) {
  6658. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6659. }
  6660. }
  6661. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  6662. if (p >= 1.0f) {
  6663. return;
  6664. }
  6665. llama_sample_softmax(ctx, candidates);
  6666. const int64_t t_start_sample_us = ggml_time_us();
  6667. // Compute the cumulative probabilities
  6668. float cum_sum = 0.0f;
  6669. size_t last_idx = candidates->size;
  6670. for (size_t i = 0; i < candidates->size; ++i) {
  6671. cum_sum += candidates->data[i].p;
  6672. // Check if the running sum is at least p or if we have kept at least min_keep tokens
  6673. // we set the last index to i+1 to indicate that the current iterate should be included in the set
  6674. if (cum_sum >= p && i + 1 >= min_keep) {
  6675. last_idx = i + 1;
  6676. break;
  6677. }
  6678. }
  6679. // Resize the output vector to keep only the top-p tokens
  6680. candidates->size = last_idx;
  6681. if (ctx) {
  6682. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6683. }
  6684. }
  6685. void llama_sample_min_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  6686. if (p <= 0.0f || !candidates->size) {
  6687. return;
  6688. }
  6689. llama_sample_softmax(ctx, candidates);
  6690. const int64_t t_start_sample_us = ggml_time_us();
  6691. float scale = candidates->data[0].p; // scale by max prob
  6692. size_t i = 1; // first token always matches
  6693. for (; i < candidates->size; ++i) {
  6694. if (candidates->data[i].p < p * scale && i >= min_keep) {
  6695. break; // prob too small
  6696. }
  6697. }
  6698. // Resize the output vector to keep only the matching tokens
  6699. candidates->size = i;
  6700. if (ctx) {
  6701. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6702. }
  6703. }
  6704. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  6705. if (z >= 1.0f || candidates->size <= 2) {
  6706. return;
  6707. }
  6708. llama_sample_softmax(nullptr, candidates);
  6709. const int64_t t_start_sample_us = ggml_time_us();
  6710. // Compute the first and second derivatives
  6711. std::vector<float> first_derivatives(candidates->size - 1);
  6712. std::vector<float> second_derivatives(candidates->size - 2);
  6713. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  6714. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  6715. }
  6716. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  6717. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  6718. }
  6719. // Calculate absolute value of second derivatives
  6720. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  6721. second_derivatives[i] = std::abs(second_derivatives[i]);
  6722. }
  6723. // Normalize the second derivatives
  6724. {
  6725. const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  6726. if (second_derivatives_sum > 1e-6f) {
  6727. for (float & value : second_derivatives) {
  6728. value /= second_derivatives_sum;
  6729. }
  6730. } else {
  6731. for (float & value : second_derivatives) {
  6732. value = 1.0f / second_derivatives.size();
  6733. }
  6734. }
  6735. }
  6736. float cum_sum = 0.0f;
  6737. size_t last_idx = candidates->size;
  6738. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  6739. cum_sum += second_derivatives[i];
  6740. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  6741. if (cum_sum > z && i >= min_keep) {
  6742. last_idx = i;
  6743. break;
  6744. }
  6745. }
  6746. // Resize the output vector to keep only the tokens above the tail location
  6747. candidates->size = last_idx;
  6748. if (ctx) {
  6749. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6750. }
  6751. }
  6752. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  6753. // Reference implementation:
  6754. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  6755. if (p >= 1.0f) {
  6756. return;
  6757. }
  6758. // Compute the softmax of logits and calculate entropy
  6759. llama_sample_softmax(nullptr, candidates);
  6760. const int64_t t_start_sample_us = ggml_time_us();
  6761. float entropy = 0.0f;
  6762. for (size_t i = 0; i < candidates->size; ++i) {
  6763. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  6764. }
  6765. // Compute the absolute difference between negative log probability and entropy for each candidate
  6766. std::vector<float> shifted_scores;
  6767. for (size_t i = 0; i < candidates->size; ++i) {
  6768. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  6769. shifted_scores.push_back(shifted_score);
  6770. }
  6771. // Sort tokens based on the shifted_scores and their corresponding indices
  6772. std::vector<size_t> indices(candidates->size);
  6773. std::iota(indices.begin(), indices.end(), 0);
  6774. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  6775. return shifted_scores[a] < shifted_scores[b];
  6776. });
  6777. // Compute the cumulative probabilities
  6778. float cum_sum = 0.0f;
  6779. size_t last_idx = indices.size();
  6780. for (size_t i = 0; i < indices.size(); ++i) {
  6781. size_t idx = indices[i];
  6782. cum_sum += candidates->data[idx].p;
  6783. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  6784. if (cum_sum > p && i >= min_keep - 1) {
  6785. last_idx = i + 1;
  6786. break;
  6787. }
  6788. }
  6789. // Resize the output vector to keep only the locally typical tokens
  6790. std::vector<llama_token_data> new_candidates;
  6791. for (size_t i = 0; i < last_idx; ++i) {
  6792. size_t idx = indices[i];
  6793. new_candidates.push_back(candidates->data[idx]);
  6794. }
  6795. // Replace the data in candidates with the new_candidates data
  6796. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  6797. candidates->size = new_candidates.size();
  6798. candidates->sorted = false;
  6799. if (ctx) {
  6800. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6801. }
  6802. }
  6803. void llama_sample_temp(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  6804. const int64_t t_start_sample_us = ggml_time_us();
  6805. for (size_t i = 0; i < candidates_p->size; ++i) {
  6806. candidates_p->data[i].logit /= temp;
  6807. }
  6808. if (ctx) {
  6809. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6810. }
  6811. }
  6812. void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  6813. llama_sample_temp(ctx, candidates_p, temp);
  6814. }
  6815. void llama_sample_repetition_penalties(
  6816. struct llama_context * ctx,
  6817. llama_token_data_array * candidates,
  6818. const llama_token * last_tokens,
  6819. size_t penalty_last_n,
  6820. float penalty_repeat,
  6821. float penalty_freq,
  6822. float penalty_present) {
  6823. if (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f)) {
  6824. return;
  6825. }
  6826. const int64_t t_start_sample_us = ggml_time_us();
  6827. // Create a frequency map to count occurrences of each token in last_tokens
  6828. std::unordered_map<llama_token, int> token_count;
  6829. for (size_t i = 0; i < penalty_last_n; ++i) {
  6830. token_count[last_tokens[i]]++;
  6831. }
  6832. // Apply frequency and presence penalties to the candidates
  6833. for (size_t i = 0; i < candidates->size; ++i) {
  6834. const auto token_iter = token_count.find(candidates->data[i].id);
  6835. if (token_iter == token_count.end()) {
  6836. continue;
  6837. }
  6838. const int count = token_iter->second;
  6839. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  6840. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  6841. if (candidates->data[i].logit <= 0) {
  6842. candidates->data[i].logit *= penalty_repeat;
  6843. } else {
  6844. candidates->data[i].logit /= penalty_repeat;
  6845. }
  6846. candidates->data[i].logit -= float(count) * penalty_freq + float(count > 0) * penalty_present;
  6847. }
  6848. candidates->sorted = false;
  6849. if (ctx) {
  6850. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6851. }
  6852. }
  6853. void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
  6854. GGML_ASSERT(ctx);
  6855. const int64_t t_start_sample_us = ggml_time_us();
  6856. bool allow_eos = false;
  6857. for (const auto & stack : grammar->stacks) {
  6858. if (stack.empty()) {
  6859. allow_eos = true;
  6860. break;
  6861. }
  6862. }
  6863. const llama_token eos = llama_token_eos(&ctx->model);
  6864. std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
  6865. candidates_decoded.reserve(candidates->size);
  6866. std::vector<llama_grammar_candidate> candidates_grammar;
  6867. candidates_grammar.reserve(candidates->size);
  6868. for (size_t i = 0; i < candidates->size; ++i) {
  6869. const llama_token id = candidates->data[i].id;
  6870. const std::string piece = llama_token_to_piece(ctx, id);
  6871. if (id == eos) {
  6872. if (!allow_eos) {
  6873. candidates->data[i].logit = -INFINITY;
  6874. }
  6875. } else if (piece.empty() || piece[0] == 0) {
  6876. candidates->data[i].logit = -INFINITY;
  6877. } else {
  6878. candidates_decoded.push_back(decode_utf8(piece, grammar->partial_utf8));
  6879. candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second });
  6880. }
  6881. }
  6882. const auto rejects = llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
  6883. for (const auto & reject : rejects) {
  6884. candidates->data[reject.index].logit = -INFINITY;
  6885. }
  6886. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6887. }
  6888. static void llama_log_softmax(float * array, size_t size) {
  6889. float max_l = *std::max_element(array, array + size);
  6890. float sum = 0.f;
  6891. for (size_t i = 0; i < size; ++i) {
  6892. float p = expf(array[i] - max_l);
  6893. sum += p;
  6894. array[i] = p;
  6895. }
  6896. for (size_t i = 0; i < size; ++i) {
  6897. array[i] = logf(array[i] / sum);
  6898. }
  6899. }
  6900. void llama_sample_classifier_free_guidance(
  6901. struct llama_context * ctx,
  6902. llama_token_data_array * candidates,
  6903. struct llama_context * guidance_ctx,
  6904. float scale) {
  6905. int64_t t_start_sample_us = ggml_time_us();
  6906. GGML_ASSERT(ctx);
  6907. auto n_vocab = llama_n_vocab(llama_get_model(ctx));
  6908. GGML_ASSERT(n_vocab == (int)candidates->size);
  6909. GGML_ASSERT(!candidates->sorted);
  6910. std::vector<float> logits_base;
  6911. logits_base.reserve(candidates->size);
  6912. for (size_t i = 0; i < candidates->size; ++i) {
  6913. logits_base.push_back(candidates->data[i].logit);
  6914. }
  6915. llama_log_softmax(logits_base.data(), candidates->size);
  6916. float* logits_guidance = llama_get_logits(guidance_ctx);
  6917. llama_log_softmax(logits_guidance, n_vocab);
  6918. for (int i = 0; i < n_vocab; ++i) {
  6919. float logit_guidance = logits_guidance[i];
  6920. float logit_base = logits_base[i];
  6921. candidates->data[i].logit = scale * (logit_base - logit_guidance) + logit_guidance;
  6922. }
  6923. if (ctx) {
  6924. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6925. }
  6926. }
  6927. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int32_t m, float * mu) {
  6928. GGML_ASSERT(ctx);
  6929. auto N = float(llama_n_vocab(llama_get_model(ctx)));
  6930. int64_t t_start_sample_us;
  6931. t_start_sample_us = ggml_time_us();
  6932. llama_sample_softmax(nullptr, candidates);
  6933. // Estimate s_hat using the most probable m tokens
  6934. float s_hat = 0.0;
  6935. float sum_ti_bi = 0.0;
  6936. float sum_ti_sq = 0.0;
  6937. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  6938. float t_i = logf(float(i + 2) / float(i + 1));
  6939. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  6940. sum_ti_bi += t_i * b_i;
  6941. sum_ti_sq += t_i * t_i;
  6942. }
  6943. s_hat = sum_ti_bi / sum_ti_sq;
  6944. // Compute k from the estimated s_hat and target surprise value
  6945. float epsilon_hat = s_hat - 1;
  6946. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  6947. // Sample the next word X using top-k sampling
  6948. llama_sample_top_k(nullptr, candidates, int(k), 1);
  6949. if (ctx) {
  6950. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6951. }
  6952. llama_token X = llama_sample_token(ctx, candidates);
  6953. t_start_sample_us = ggml_time_us();
  6954. // Compute error as the difference between observed surprise and target surprise value
  6955. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  6956. return candidate.id == X;
  6957. }));
  6958. float observed_surprise = -log2f(candidates->data[X_idx].p);
  6959. float e = observed_surprise - tau;
  6960. // Update mu using the learning rate and error
  6961. *mu = *mu - eta * e;
  6962. if (ctx) {
  6963. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6964. }
  6965. return X;
  6966. }
  6967. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  6968. int64_t t_start_sample_us;
  6969. t_start_sample_us = ggml_time_us();
  6970. llama_sample_softmax(ctx, candidates);
  6971. // Truncate the words with surprise values greater than mu
  6972. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  6973. return -log2f(candidate.p) > *mu;
  6974. }));
  6975. if (candidates->size == 0) {
  6976. candidates->size = 1;
  6977. }
  6978. if (ctx) {
  6979. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6980. }
  6981. // Normalize the probabilities of the remaining words
  6982. llama_sample_softmax(ctx, candidates);
  6983. // Sample the next word X from the remaining words
  6984. llama_token X = llama_sample_token(ctx, candidates);
  6985. t_start_sample_us = ggml_time_us();
  6986. // Compute error as the difference between observed surprise and target surprise value
  6987. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  6988. return candidate.id == X;
  6989. }));
  6990. float observed_surprise = -log2f(candidates->data[X_idx].p);
  6991. float e = observed_surprise - tau;
  6992. // Update mu using the learning rate and error
  6993. *mu = *mu - eta * e;
  6994. if (ctx) {
  6995. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  6996. }
  6997. return X;
  6998. }
  6999. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  7000. const int64_t t_start_sample_us = ggml_time_us();
  7001. // Find max element
  7002. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  7003. return a.logit < b.logit;
  7004. });
  7005. llama_token result = max_iter->id;
  7006. if (ctx) {
  7007. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7008. ctx->n_sample++;
  7009. }
  7010. return result;
  7011. }
  7012. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  7013. GGML_ASSERT(ctx);
  7014. const int64_t t_start_sample_us = ggml_time_us();
  7015. llama_sample_softmax(nullptr, candidates);
  7016. std::vector<float> probs;
  7017. probs.reserve(candidates->size);
  7018. for (size_t i = 0; i < candidates->size; ++i) {
  7019. probs.push_back(candidates->data[i].p);
  7020. }
  7021. std::discrete_distribution<> dist(probs.begin(), probs.end());
  7022. auto & rng = ctx->rng;
  7023. int idx = dist(rng);
  7024. llama_token result = candidates->data[idx].id;
  7025. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7026. ctx->n_sample++;
  7027. return result;
  7028. }
  7029. void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
  7030. const int64_t t_start_sample_us = ggml_time_us();
  7031. if (token == llama_token_eos(&ctx->model)) {
  7032. for (const auto & stack : grammar->stacks) {
  7033. if (stack.empty()) {
  7034. return;
  7035. }
  7036. }
  7037. GGML_ASSERT(false);
  7038. }
  7039. const std::string piece = llama_token_to_piece(ctx, token);
  7040. // Note terminating 0 in decoded string
  7041. const auto decoded = decode_utf8(piece, grammar->partial_utf8);
  7042. const auto & code_points = decoded.first;
  7043. for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
  7044. grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
  7045. }
  7046. grammar->partial_utf8 = decoded.second;
  7047. GGML_ASSERT(!grammar->stacks.empty());
  7048. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7049. }
  7050. //
  7051. // Beam search
  7052. //
  7053. struct llama_beam {
  7054. std::vector<llama_token> tokens;
  7055. float p; // Cumulative beam probability (renormalized relative to all beams)
  7056. bool eob; // Initialize end-of-beam to false. Callback sets this to true.
  7057. // Sort beams by probability. In case of ties, prefer beams at eob.
  7058. bool operator<(const llama_beam & rhs) const {
  7059. return std::make_pair(p, eob) < std::make_pair(rhs.p, rhs.eob);
  7060. }
  7061. // Shift off first n tokens and discard them.
  7062. void shift_tokens(const size_t n) {
  7063. if (n) {
  7064. std::copy(tokens.begin() + n, tokens.end(), tokens.begin());
  7065. tokens.resize(tokens.size() - n);
  7066. }
  7067. }
  7068. llama_beam_view view() const { return {tokens.data(), tokens.size(), p, eob}; }
  7069. };
  7070. // A struct for calculating logit-related info.
  7071. struct llama_logit_info {
  7072. const float * const logits;
  7073. const int n_vocab;
  7074. const float max_l;
  7075. const float normalizer;
  7076. struct sum_exp {
  7077. float max_l;
  7078. float operator()(float sum, float l) const { return sum + std::exp(l - max_l); }
  7079. };
  7080. llama_logit_info(llama_context * ctx)
  7081. : logits(llama_get_logits(ctx))
  7082. , n_vocab(llama_n_vocab(llama_get_model(ctx)))
  7083. , max_l(*std::max_element(logits, logits + n_vocab))
  7084. , normalizer(1.0f / std::accumulate(logits, logits + n_vocab, 0.0f, sum_exp{max_l}))
  7085. { }
  7086. llama_token_data get_token_data(const llama_token token_id) const {
  7087. constexpr auto p = std::numeric_limits<float>::quiet_NaN(); // never used
  7088. return {token_id, logits[token_id], p};
  7089. }
  7090. // Return top k token_data by logit.
  7091. std::vector<llama_token_data> top_k(size_t k) {
  7092. std::vector<llama_token_data> min_heap; // min-heap by logit
  7093. const llama_token k_min = std::min(static_cast<llama_token>(k), n_vocab);
  7094. min_heap.reserve(k_min);
  7095. for (llama_token token_id = 0 ; token_id < k_min ; ++token_id) {
  7096. min_heap.push_back(get_token_data(token_id));
  7097. }
  7098. auto comp = [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; };
  7099. std::make_heap(min_heap.begin(), min_heap.end(), comp);
  7100. for (llama_token token_id = k_min ; token_id < n_vocab ; ++token_id) {
  7101. if (min_heap.front().logit < logits[token_id]) {
  7102. std::pop_heap(min_heap.begin(), min_heap.end(), comp);
  7103. min_heap.back().id = token_id;
  7104. min_heap.back().logit = logits[token_id];
  7105. std::push_heap(min_heap.begin(), min_heap.end(), comp);
  7106. }
  7107. }
  7108. return min_heap;
  7109. }
  7110. float probability_from_logit(float logit) const {
  7111. return normalizer * std::exp(logit - max_l);
  7112. }
  7113. };
  7114. struct llama_beam_search_data {
  7115. llama_context * ctx;
  7116. size_t n_beams;
  7117. int n_past;
  7118. int n_predict;
  7119. std::vector<llama_beam> beams;
  7120. std::vector<llama_beam> next_beams;
  7121. // Re-calculated on each loop iteration
  7122. size_t common_prefix_length;
  7123. // Used to communicate to/from callback on beams state.
  7124. std::vector<llama_beam_view> beam_views;
  7125. llama_beam_search_data(llama_context * ctx, size_t n_beams, int n_past, int n_predict)
  7126. : ctx(ctx)
  7127. , n_beams(n_beams)
  7128. , n_past(n_past)
  7129. , n_predict(n_predict)
  7130. , beam_views(n_beams) {
  7131. beams.reserve(n_beams);
  7132. next_beams.reserve(n_beams);
  7133. }
  7134. // Collapse beams to a single beam given by index.
  7135. void collapse_beams(const size_t beam_idx) {
  7136. if (0u < beam_idx) {
  7137. std::swap(beams[0], beams[beam_idx]);
  7138. }
  7139. beams.resize(1);
  7140. }
  7141. // Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
  7142. // The repetitive patterns below reflect the 2 stages of heaps:
  7143. // * Gather elements until the vector is full, then call std::make_heap() on it.
  7144. // * If the heap is full and a new element is found that should be included, pop the
  7145. // least element to the back(), replace it with the new, then push it into the heap.
  7146. void fill_next_beams_by_top_probabilities(llama_beam & beam) {
  7147. // Min-heaps use a greater-than comparator.
  7148. const auto comp = [](const llama_beam & a, const llama_beam & b) { return a.p > b.p; };
  7149. if (beam.eob) {
  7150. // beam is at end-of-sentence, so just copy it to next_beams if its probability is high enough.
  7151. if (next_beams.size() < n_beams) {
  7152. next_beams.push_back(std::move(beam));
  7153. if (next_beams.size() == n_beams) {
  7154. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  7155. }
  7156. } else if (next_beams.front().p < beam.p) {
  7157. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  7158. next_beams.back() = std::move(beam);
  7159. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  7160. }
  7161. } else {
  7162. // beam is not at end-of-sentence, so branch with next top_k tokens.
  7163. if (!beam.tokens.empty()) {
  7164. llama_decode(ctx, llama_batch_get_one(beam.tokens.data(), beam.tokens.size(), n_past, 0));
  7165. }
  7166. llama_logit_info logit_info(ctx);
  7167. std::vector<llama_token_data> next_tokens = logit_info.top_k(n_beams);
  7168. size_t i=0;
  7169. if (next_beams.size() < n_beams) {
  7170. for (; next_beams.size() < n_beams ; ++i) {
  7171. llama_beam next_beam = beam;
  7172. next_beam.tokens.push_back(next_tokens[i].id);
  7173. next_beam.p *= logit_info.probability_from_logit(next_tokens[i].logit);
  7174. next_beams.push_back(std::move(next_beam));
  7175. }
  7176. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  7177. } else {
  7178. for (; next_beams.front().p == 0.0f ; ++i) {
  7179. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  7180. next_beams.back() = beam;
  7181. next_beams.back().tokens.push_back(next_tokens[i].id);
  7182. next_beams.back().p *= logit_info.probability_from_logit(next_tokens[i].logit);
  7183. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  7184. }
  7185. }
  7186. for (; i < n_beams ; ++i) {
  7187. const float next_p = beam.p * logit_info.probability_from_logit(next_tokens[i].logit);
  7188. if (next_beams.front().p < next_p) {
  7189. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  7190. next_beams.back() = beam;
  7191. next_beams.back().tokens.push_back(next_tokens[i].id);
  7192. next_beams.back().p = next_p;
  7193. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  7194. }
  7195. }
  7196. }
  7197. }
  7198. // Find common_prefix_length based on beams.
  7199. // Requires beams is not empty.
  7200. size_t find_common_prefix_length() {
  7201. size_t common_prefix_length = beams[0].tokens.size();
  7202. for (size_t i = 1 ; i < beams.size() ; ++i) {
  7203. common_prefix_length = std::min(common_prefix_length, beams[i].tokens.size());
  7204. for (size_t j = 0 ; j < common_prefix_length ; ++j) {
  7205. if (beams[0].tokens[j] != beams[i].tokens[j]) {
  7206. common_prefix_length = j;
  7207. break;
  7208. }
  7209. }
  7210. }
  7211. return common_prefix_length;
  7212. }
  7213. // Construct beams_state to send back to caller via the callback function.
  7214. // Side effect: set common_prefix_length = find_common_prefix_length();
  7215. llama_beams_state get_beams_state(const bool last_call) {
  7216. for (size_t i = 0 ; i < beams.size() ; ++i) {
  7217. beam_views[i] = beams[i].view();
  7218. }
  7219. common_prefix_length = find_common_prefix_length();
  7220. return {beam_views.data(), beams.size(), common_prefix_length, last_call};
  7221. }
  7222. // Loop:
  7223. // * while i < n_predict, AND
  7224. // * any of the beams have not yet reached end-of-beam (eob), AND
  7225. // * the highest probability beam(s) (plural in case of ties) are not at end-of-sentence
  7226. // (since all other beam probabilities can only decrease)
  7227. void loop(const llama_beam_search_callback_fn_t callback, void * const callback_data) {
  7228. beams.push_back({{}, 1.0f, false}); // Start with one empty beam w/ probability = 1.0 and !eob.
  7229. const auto not_eob = [](const llama_beam & beam) { return !beam.eob; };
  7230. for (int i = 0 ; i < n_predict && std::any_of(beams.begin(),beams.end(),not_eob) &&
  7231. !beams[top_beam_index()].eob ; ++i) {
  7232. callback(callback_data, get_beams_state(false)); // Sets common_prefix_length
  7233. update_beams_from_beam_views(); // Update values (p,eob) that callback may have changed.
  7234. if (common_prefix_length) {
  7235. llama_decode(ctx, llama_batch_get_one(beams[0].tokens.data(), common_prefix_length, n_past, 0));
  7236. n_past += common_prefix_length;
  7237. }
  7238. // Zero-out next_beam probabilities to place them last in following min-heap.
  7239. std::for_each(next_beams.begin(), next_beams.end(), [](llama_beam & beam) { beam.p = 0.0f; });
  7240. for (llama_beam & beam : beams) {
  7241. beam.shift_tokens(common_prefix_length);
  7242. fill_next_beams_by_top_probabilities(beam);
  7243. }
  7244. // next_beams become the beams of next/final iteration. Swap them to re-use memory.
  7245. beams.swap(next_beams);
  7246. renormalize_beam_probabilities(beams);
  7247. }
  7248. collapse_beams(top_beam_index());
  7249. callback(callback_data, get_beams_state(true));
  7250. }
  7251. // As beams grow, the cumulative probabilities decrease.
  7252. // Renormalize them to avoid floating point underflow.
  7253. static void renormalize_beam_probabilities(std::vector<llama_beam> & beams) {
  7254. const auto sum_p = [](float sum, llama_beam & beam) { return sum + beam.p; };
  7255. const float inv_sum = 1.0f / std::accumulate(beams.begin(), beams.end(), 0.0f, sum_p);
  7256. std::for_each(beams.begin(), beams.end(), [=](llama_beam & beam) { beam.p *= inv_sum; });
  7257. }
  7258. // Assumes beams is non-empty. Uses llama_beam::operator<() for ordering.
  7259. size_t top_beam_index() {
  7260. return std::max_element(beams.begin(), beams.end()) - beams.begin();
  7261. }
  7262. // Copy (p,eob) for each beam which may have been changed by the callback.
  7263. void update_beams_from_beam_views() {
  7264. for (size_t i = 0 ; i < beams.size() ; ++i) {
  7265. beams[i].p = beam_views[i].p;
  7266. beams[i].eob = beam_views[i].eob;
  7267. }
  7268. }
  7269. };
  7270. void llama_beam_search(llama_context * ctx,
  7271. llama_beam_search_callback_fn_t callback, void * callback_data,
  7272. size_t n_beams, int n_past, int n_predict) {
  7273. assert(ctx);
  7274. const int64_t t_start_sample_us = ggml_time_us();
  7275. llama_beam_search_data beam_search_data(ctx, n_beams, n_past, n_predict);
  7276. beam_search_data.loop(callback, callback_data);
  7277. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  7278. ctx->n_sample++;
  7279. }
  7280. //
  7281. // quantization
  7282. //
  7283. struct quantize_state_internal {
  7284. const llama_model & model;
  7285. const llama_model_quantize_params * params;
  7286. int n_attention_wv = 0;
  7287. int n_feed_forward_w2 = 0;
  7288. int i_attention_wv = 0;
  7289. int i_feed_forward_w2 = 0;
  7290. int n_k_quantized = 0;
  7291. int n_fallback = 0;
  7292. quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
  7293. : model(model)
  7294. , params(params)
  7295. {}
  7296. };
  7297. static void llama_convert_tensor_internal(
  7298. struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
  7299. const size_t nelements, const int nthread
  7300. ) {
  7301. if (output.size() < nelements) {
  7302. output.resize(nelements);
  7303. }
  7304. float * f32_output = (float *) output.data();
  7305. ggml_type_traits_t qtype;
  7306. if (ggml_is_quantized(tensor->type)) {
  7307. qtype = ggml_internal_get_type_traits(tensor->type);
  7308. if (qtype.to_float == NULL) {
  7309. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
  7310. }
  7311. } else if (tensor->type != GGML_TYPE_F16) {
  7312. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
  7313. }
  7314. if (nthread < 2) {
  7315. if (tensor->type == GGML_TYPE_F16) {
  7316. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
  7317. } else if (ggml_is_quantized(tensor->type)) {
  7318. qtype.to_float(tensor->data, f32_output, nelements);
  7319. } else {
  7320. GGML_ASSERT(false); // unreachable
  7321. }
  7322. return;
  7323. }
  7324. size_t block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type);
  7325. size_t block_size_bytes = ggml_type_size(tensor->type);
  7326. GGML_ASSERT(nelements % block_size == 0);
  7327. size_t nblocks = nelements / block_size;
  7328. size_t blocks_per_thread = nblocks / nthread;
  7329. size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  7330. size_t in_buff_offs = 0;
  7331. size_t out_buff_offs = 0;
  7332. for (int tnum = 0; tnum < nthread; tnum++) {
  7333. size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  7334. size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
  7335. size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  7336. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  7337. if (typ == GGML_TYPE_F16) {
  7338. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  7339. } else {
  7340. qtype.to_float(inbuf, outbuf, nels);
  7341. }
  7342. };
  7343. workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
  7344. in_buff_offs += thr_block_bytes;
  7345. out_buff_offs += thr_elems;
  7346. }
  7347. for (auto & w : workers) { w.join(); }
  7348. workers.clear();
  7349. }
  7350. static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
  7351. const std::string name = ggml_get_name(tensor);
  7352. // TODO: avoid hardcoded tensor names - use the TN_* constants
  7353. const llm_arch arch = qs.model.arch;
  7354. const auto tn = LLM_TN(arch);
  7355. auto use_more_bits = [](int i_layer, int num_layers) -> bool {
  7356. return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
  7357. };
  7358. if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
  7359. int nx = tensor->ne[0];
  7360. if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
  7361. new_type = GGML_TYPE_Q8_0;
  7362. }
  7363. else if (new_type != GGML_TYPE_Q8_0) {
  7364. new_type = GGML_TYPE_Q6_K;
  7365. }
  7366. } else if (name.find("attn_v.weight") != std::string::npos) {
  7367. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  7368. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  7369. new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  7370. }
  7371. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  7372. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  7373. use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
  7374. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
  7375. else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
  7376. (qs.i_attention_wv < qs.n_attention_wv/8 || qs.i_attention_wv >= 7*qs.n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
  7377. if (qs.model.type == MODEL_70B) {
  7378. // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
  7379. // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
  7380. // nearly negligible increase in model size by quantizing this tensor with more bits:
  7381. if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
  7382. }
  7383. if (qs.model.hparams.n_expert == 8) {
  7384. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  7385. // TODO: explore better strategies
  7386. new_type = GGML_TYPE_Q8_0;
  7387. }
  7388. ++qs.i_attention_wv;
  7389. } else if (name.find("attn_k.weight") != std::string::npos) {
  7390. if (qs.model.hparams.n_expert == 8) {
  7391. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  7392. // TODO: explore better strategies
  7393. new_type = GGML_TYPE_Q8_0;
  7394. }
  7395. } else if (name.find("ffn_down.weight") != std::string::npos) {
  7396. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  7397. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  7398. new_type = qs.i_feed_forward_w2 < 2 ? GGML_TYPE_Q5_K
  7399. : arch != LLM_ARCH_FALCON || use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2) ? GGML_TYPE_Q4_K
  7400. : GGML_TYPE_Q3_K;
  7401. }
  7402. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
  7403. new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
  7404. }
  7405. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
  7406. if (arch == LLM_ARCH_FALCON) {
  7407. new_type = qs.i_feed_forward_w2 < 2 ? GGML_TYPE_Q6_K :
  7408. use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  7409. } else {
  7410. if (use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
  7411. }
  7412. }
  7413. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
  7414. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && qs.i_feed_forward_w2 < 4) {
  7415. new_type = GGML_TYPE_Q5_K;
  7416. }
  7417. ++qs.i_feed_forward_w2;
  7418. } else if (name.find("attn_output.weight") != std::string::npos) {
  7419. if (arch != LLM_ARCH_FALCON) {
  7420. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
  7421. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) new_type = GGML_TYPE_Q4_K;
  7422. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  7423. } else {
  7424. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  7425. }
  7426. }
  7427. else if (name.find("attn_qkv.weight") != std::string::npos) {
  7428. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  7429. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
  7430. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
  7431. }
  7432. else if (name.find("ffn_gate.weight") != std::string::npos || name.find("ffn_up.weight") != std::string::npos) {
  7433. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  7434. }
  7435. // This can be used to reduce the size of the Q5_K_S model.
  7436. // The associated PPL increase is fully in line with the size reduction
  7437. //else {
  7438. // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
  7439. //}
  7440. bool convert_incompatible_tensor = false;
  7441. if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
  7442. new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) {
  7443. int nx = tensor->ne[0];
  7444. int ny = tensor->ne[1];
  7445. if (nx % QK_K != 0) {
  7446. LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
  7447. convert_incompatible_tensor = true;
  7448. } else {
  7449. ++qs.n_k_quantized;
  7450. }
  7451. }
  7452. if (convert_incompatible_tensor) {
  7453. switch (new_type) {
  7454. case GGML_TYPE_Q2_K: new_type = GGML_TYPE_Q4_0; break;
  7455. case GGML_TYPE_Q3_K: new_type = GGML_TYPE_Q4_1; break;
  7456. case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
  7457. case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
  7458. case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
  7459. default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
  7460. }
  7461. LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
  7462. ++qs.n_fallback;
  7463. }
  7464. return new_type;
  7465. }
  7466. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  7467. ggml_type quantized_type;
  7468. llama_ftype ftype = params->ftype;
  7469. switch (params->ftype) {
  7470. case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
  7471. case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
  7472. case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
  7473. case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
  7474. case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
  7475. case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
  7476. case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
  7477. // K-quants
  7478. case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
  7479. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  7480. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  7481. case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break;
  7482. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  7483. case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break;
  7484. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  7485. case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
  7486. case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
  7487. case LLAMA_FTYPE_MOSTLY_IQ2_XXS:quantized_type = GGML_TYPE_IQ2_XXS; break;
  7488. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  7489. }
  7490. int nthread = params->nthread;
  7491. if (nthread <= 0) {
  7492. nthread = std::thread::hardware_concurrency();
  7493. }
  7494. // mmap consistently increases speed Linux, and also increases speed on Windows with
  7495. // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
  7496. #if defined(__linux__) || defined(_WIN32)
  7497. constexpr bool use_mmap = true;
  7498. #else
  7499. constexpr bool use_mmap = false;
  7500. #endif
  7501. llama_model_loader ml(fname_inp, use_mmap, NULL);
  7502. ml.init_mapping(false); // no prefetching?
  7503. llama_model model;
  7504. llm_load_arch(ml, model);
  7505. llm_load_hparams(ml, model);
  7506. struct quantize_state_internal qs(model, params);
  7507. if (params->only_copy) {
  7508. ftype = model.ftype;
  7509. }
  7510. const size_t align = GGUF_DEFAULT_ALIGNMENT;
  7511. struct gguf_context * ctx_out = gguf_init_empty();
  7512. // copy the KV pairs from the input file
  7513. gguf_set_kv (ctx_out, ml.ctx_gguf);
  7514. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  7515. gguf_set_val_u32(ctx_out, "general.file_type", ftype);
  7516. for (int i = 0; i < ml.n_tensors; ++i) {
  7517. struct ggml_tensor * meta = ml.get_tensor_meta(i);
  7518. const std::string name = ggml_get_name(meta);
  7519. // TODO: avoid hardcoded tensor names - use the TN_* constants
  7520. if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
  7521. ++qs.n_attention_wv;
  7522. }
  7523. else if (name.find("ffn_down.weight") != std::string::npos) {
  7524. ++qs.n_feed_forward_w2;
  7525. }
  7526. }
  7527. if (qs.n_attention_wv != qs.n_feed_forward_w2 || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) {
  7528. LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_feed_forward_w2 = %d, hparams.n_layer = %d\n",
  7529. __func__, qs.n_attention_wv, qs.n_feed_forward_w2, model.hparams.n_layer);
  7530. }
  7531. size_t total_size_org = 0;
  7532. size_t total_size_new = 0;
  7533. std::vector<int64_t> hist_all(1 << 4, 0);
  7534. std::vector<std::thread> workers;
  7535. workers.reserve(nthread);
  7536. std::mutex mutex;
  7537. int idx = 0;
  7538. std::vector<no_init<uint8_t>> read_data;
  7539. std::vector<no_init<uint8_t>> work;
  7540. std::vector<no_init<float>> f32_conv_buf;
  7541. // populate the original tensors so we get an initial meta data
  7542. for (int i = 0; i < ml.n_tensors; ++i) {
  7543. struct ggml_tensor * meta = ml.get_tensor_meta(i);
  7544. gguf_add_tensor(ctx_out, meta);
  7545. }
  7546. std::ofstream fout(fname_out, std::ios::binary);
  7547. fout.exceptions(std::ofstream::failbit); // fail fast on write errors
  7548. const size_t meta_size = gguf_get_meta_size(ctx_out);
  7549. LLAMA_LOG_INFO("%s: meta size = %zu bytes\n", __func__, meta_size);
  7550. // placeholder for the meta data
  7551. ::zeros(fout, meta_size);
  7552. for (int i = 0; i < ml.n_tensors; ++i) {
  7553. struct ggml_tensor * tensor = ml.get_tensor_meta(i);
  7554. const std::string name = ggml_get_name(tensor);
  7555. if (!ml.use_mmap) {
  7556. if (read_data.size() < ggml_nbytes(tensor)) {
  7557. read_data.resize(ggml_nbytes(tensor));
  7558. }
  7559. tensor->data = read_data.data();
  7560. }
  7561. ml.load_data_for(tensor);
  7562. LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
  7563. ++idx, ml.n_tensors,
  7564. ggml_get_name(tensor),
  7565. llama_format_tensor_shape(tensor).c_str(),
  7566. ggml_type_name(tensor->type));
  7567. // This used to be a regex, but <regex> has an extreme cost to compile times.
  7568. bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
  7569. // quantize only 2D tensors
  7570. quantize &= (ggml_n_dims(tensor) == 2);
  7571. quantize &= params->quantize_output_tensor || name != "output.weight";
  7572. quantize &= !params->only_copy;
  7573. // do not quantize expert gating tensors
  7574. quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
  7575. enum ggml_type new_type;
  7576. void * new_data;
  7577. size_t new_size;
  7578. if (quantize) {
  7579. new_type = quantized_type;
  7580. if (!params->pure) {
  7581. new_type = get_k_quant_type(qs, new_type, tensor, ftype);
  7582. }
  7583. // If we've decided to quantize to the same type the tensor is already
  7584. // in then there's nothing to do.
  7585. quantize = tensor->type != new_type;
  7586. }
  7587. if (!quantize) {
  7588. new_type = tensor->type;
  7589. new_data = tensor->data;
  7590. new_size = ggml_nbytes(tensor);
  7591. LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
  7592. } else {
  7593. const size_t nelements = ggml_nelements(tensor);
  7594. float * f32_data;
  7595. if (tensor->type == GGML_TYPE_F32) {
  7596. f32_data = (float *) tensor->data;
  7597. } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
  7598. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
  7599. } else {
  7600. llama_convert_tensor_internal(tensor, f32_conv_buf, workers, nelements, nthread);
  7601. f32_data = (float *) f32_conv_buf.data();
  7602. }
  7603. LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
  7604. fflush(stdout);
  7605. if (work.size() < nelements * 4) {
  7606. work.resize(nelements * 4); // upper bound on size
  7607. }
  7608. new_data = work.data();
  7609. std::array<int64_t, 1 << 4> hist_cur = {};
  7610. static const int chunk_size = 32 * 512;
  7611. const int nchunk = (nelements + chunk_size - 1)/chunk_size;
  7612. const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
  7613. if (nthread_use < 2) {
  7614. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data());
  7615. } else {
  7616. size_t counter = 0;
  7617. new_size = 0;
  7618. auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements]() {
  7619. std::array<int64_t, 1 << 4> local_hist = {};
  7620. size_t local_size = 0;
  7621. while (true) {
  7622. std::unique_lock<std::mutex> lock(mutex);
  7623. size_t first = counter; counter += chunk_size;
  7624. if (first >= nelements) {
  7625. if (local_size > 0) {
  7626. for (int j=0; j<int(local_hist.size()); ++j) {
  7627. hist_cur[j] += local_hist[j];
  7628. }
  7629. new_size += local_size;
  7630. }
  7631. break;
  7632. }
  7633. lock.unlock();
  7634. size_t last = std::min(nelements, first + chunk_size);
  7635. local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data());
  7636. }
  7637. };
  7638. for (int it = 0; it < nthread_use - 1; ++it) {
  7639. workers.emplace_back(compute);
  7640. }
  7641. compute();
  7642. for (auto & w : workers) { w.join(); }
  7643. workers.clear();
  7644. }
  7645. LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB | hist: ", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
  7646. int64_t tot_count = 0;
  7647. for (size_t i = 0; i < hist_cur.size(); i++) {
  7648. hist_all[i] += hist_cur[i];
  7649. tot_count += hist_cur[i];
  7650. }
  7651. if (tot_count > 0) {
  7652. for (size_t i = 0; i < hist_cur.size(); i++) {
  7653. LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
  7654. }
  7655. }
  7656. LLAMA_LOG_INFO("\n");
  7657. }
  7658. total_size_org += ggml_nbytes(tensor);
  7659. total_size_new += new_size;
  7660. // update the gguf meta data as we go
  7661. gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
  7662. gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
  7663. // write tensor data + padding
  7664. fout.write((const char *) new_data, new_size);
  7665. zeros(fout, GGML_PAD(new_size, align) - new_size);
  7666. }
  7667. // go back to beginning of file and write the updated meta data
  7668. {
  7669. fout.seekp(0);
  7670. std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
  7671. gguf_get_meta_data(ctx_out, data.data());
  7672. fout.write((const char *) data.data(), data.size());
  7673. }
  7674. fout.close();
  7675. gguf_free(ctx_out);
  7676. LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  7677. LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  7678. // print histogram for all tensors
  7679. {
  7680. int64_t sum_all = 0;
  7681. for (size_t i = 0; i < hist_all.size(); i++) {
  7682. sum_all += hist_all[i];
  7683. }
  7684. if (sum_all > 0) {
  7685. LLAMA_LOG_INFO("%s: hist: ", __func__);
  7686. for (size_t i = 0; i < hist_all.size(); i++) {
  7687. LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
  7688. }
  7689. LLAMA_LOG_INFO("\n");
  7690. }
  7691. }
  7692. if (qs.n_fallback > 0) {
  7693. LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) incompatible with k-quants and required fallback quantization\n",
  7694. __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
  7695. }
  7696. }
  7697. static int llama_apply_lora_from_file_internal(
  7698. const struct llama_model & model, const char * path_lora, float scale, const char * path_base_model, int n_threads
  7699. ) {
  7700. LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  7701. const int64_t t_start_lora_us = ggml_time_us();
  7702. llama_file fin(path_lora, "rb");
  7703. // verify magic and version
  7704. {
  7705. uint32_t magic = fin.read_u32();
  7706. if (magic != LLAMA_FILE_MAGIC_GGLA) {
  7707. LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
  7708. return 1;
  7709. }
  7710. uint32_t format_version = fin.read_u32();
  7711. if (format_version != 1) {
  7712. LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
  7713. return 1;
  7714. }
  7715. }
  7716. int32_t lora_r = fin.read_u32();
  7717. int32_t lora_alpha = fin.read_u32();
  7718. float scaling = scale * (float)lora_alpha / (float)lora_r;
  7719. LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  7720. // create a name -> tensor map of the model to accelerate lookups
  7721. // find the max tensor size to estimate the required temporary buffer size
  7722. size_t max_tensor_size = 0;
  7723. std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
  7724. for (const auto & kv : model.tensors_by_name) {
  7725. model_tensors.insert(kv);
  7726. size_t f32_size = ggml_nelements(kv.second) * sizeof(float);
  7727. max_tensor_size = std::max(max_tensor_size, f32_size);
  7728. }
  7729. // create a temporary ggml context to store the lora tensors
  7730. // TODO: use ggml-alloc
  7731. size_t lora_ctx_size = max_tensor_size * 3;
  7732. LLAMA_LOG_INFO("%s: allocating %.f MB for lora temporary buffer\n", __func__, lora_ctx_size / 1024.0 / 1024.0);
  7733. std::vector<uint8_t> lora_buf(lora_ctx_size);
  7734. struct ggml_init_params params;
  7735. params.mem_size = lora_buf.size();
  7736. params.mem_buffer = lora_buf.data();
  7737. params.no_alloc = false;
  7738. using unique_context = std::unique_ptr<ggml_context, decltype(&ggml_free)>;
  7739. unique_context lora_ctx(nullptr, ggml_free);
  7740. lora_ctx.reset(ggml_init(params));
  7741. std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
  7742. // load base model
  7743. std::unique_ptr<llama_model_loader> ml;
  7744. if (path_base_model) {
  7745. LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
  7746. ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*kv_overrides*/ nullptr));
  7747. ml->init_mapping(false); // no prefetching
  7748. }
  7749. // read tensors and apply
  7750. bool warned = false;
  7751. int n_tensors = 0;
  7752. std::vector<uint8_t> work_buffer;
  7753. while (true) {
  7754. if (fin.tell() == fin.size) {
  7755. // eof
  7756. break;
  7757. }
  7758. int32_t n_dims;
  7759. int32_t name_len;
  7760. int32_t ftype;
  7761. fin.read_raw(&n_dims, sizeof(n_dims));
  7762. fin.read_raw(&name_len, sizeof(name_len));
  7763. fin.read_raw(&ftype, sizeof(ftype));
  7764. if (n_dims != 1 && n_dims != 2) {
  7765. LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
  7766. return 1;
  7767. }
  7768. int32_t ne[2] = { 1, 1 };
  7769. for (int i = 0; i < n_dims; ++i) {
  7770. fin.read_raw(&ne[i], sizeof(ne[i]));
  7771. }
  7772. std::string name;
  7773. {
  7774. GGML_ASSERT(name_len <= 1024);
  7775. char buf[1024];
  7776. fin.read_raw(buf, name_len);
  7777. name = std::string(buf, name_len);
  7778. }
  7779. // check for lora suffix and get the type of tensor
  7780. const std::string lora_suffix = ".lora";
  7781. size_t pos = name.rfind(lora_suffix);
  7782. if (pos == std::string::npos) {
  7783. LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  7784. return 1;
  7785. }
  7786. std::string lora_type = name.substr(pos + lora_suffix.length());
  7787. std::string base_name = name;
  7788. base_name.erase(pos);
  7789. // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(), base_name.c_str(), lora_type.c_str());
  7790. if (model_tensors.find(base_name) == model_tensors.end()) {
  7791. LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
  7792. return 1;
  7793. }
  7794. // create ggml tensor
  7795. ggml_type wtype;
  7796. switch (ftype) {
  7797. case 0: wtype = GGML_TYPE_F32; break;
  7798. case 1: wtype = GGML_TYPE_F16; break;
  7799. default:
  7800. {
  7801. LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
  7802. __func__, ftype);
  7803. return false;
  7804. }
  7805. }
  7806. ggml_tensor * lora_tensor = ggml_new_tensor_2d(lora_ctx.get(), wtype, ne[0], ne[1]);
  7807. ggml_set_name(lora_tensor, name.c_str());
  7808. // load tensor data
  7809. size_t offset = fin.tell();
  7810. size_t tensor_data_size = ggml_nbytes(lora_tensor);
  7811. offset = (offset + 31) & -32;
  7812. fin.seek(offset, SEEK_SET);
  7813. fin.read_raw(lora_tensor->data, tensor_data_size);
  7814. lora_tensors[name] = lora_tensor;
  7815. // check if we have both A and B tensors and apply
  7816. if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
  7817. lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
  7818. ggml_tensor * dest_t = model_tensors[base_name];
  7819. offload_func_t offload_func = ggml_offload_nop;
  7820. offload_func_t offload_func_force_inplace = ggml_offload_nop;
  7821. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  7822. if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) {
  7823. if (dest_t->type != GGML_TYPE_F16) {
  7824. throw std::runtime_error(format(
  7825. "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models. dest_t->type: %d", __func__, dest_t->type));
  7826. }
  7827. offload_func = ggml_cuda_assign_buffers;
  7828. offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace;
  7829. }
  7830. #endif // GGML_USE_CUBLAS
  7831. ggml_tensor * base_t;
  7832. if (ml) {
  7833. struct gguf_context * ctx_gguf = ml->ctx_gguf;
  7834. // load from base model
  7835. if (gguf_find_tensor(ctx_gguf, base_name.c_str()) < 0) {
  7836. LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  7837. return 1;
  7838. }
  7839. base_t = ml->get_tensor_meta(base_name.c_str());
  7840. ml->load_data_for(base_t);
  7841. } else {
  7842. base_t = dest_t;
  7843. }
  7844. if (ggml_is_quantized(base_t->type)) {
  7845. if (!warned) {
  7846. LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  7847. "use a f16 or f32 base model with --lora-base\n", __func__);
  7848. warned = true;
  7849. }
  7850. }
  7851. ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
  7852. GGML_ASSERT(loraA->type == GGML_TYPE_F32);
  7853. ggml_set_name(loraA, "loraA");
  7854. ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
  7855. GGML_ASSERT(loraB->type == GGML_TYPE_F32);
  7856. ggml_set_name(loraB, "loraB");
  7857. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  7858. LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  7859. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  7860. return 1;
  7861. }
  7862. // w = w + BA*s
  7863. ggml_tensor * BA = ggml_mul_mat(lora_ctx.get(), loraA, loraB);
  7864. offload_func(BA);
  7865. ggml_set_name(BA, "BA");
  7866. if (scaling != 1.0f) {
  7867. BA = ggml_scale_inplace(lora_ctx.get(), BA, scaling);
  7868. offload_func(BA);
  7869. ggml_set_name(BA, "BA_scaled");
  7870. }
  7871. ggml_tensor * r;
  7872. if (base_t == dest_t) {
  7873. r = ggml_add_inplace(lora_ctx.get(), dest_t, BA);
  7874. offload_func_force_inplace(r);
  7875. ggml_set_name(r, "r_add_inplace");
  7876. }
  7877. else {
  7878. r = ggml_add(lora_ctx.get(), base_t, BA);
  7879. offload_func(r);
  7880. ggml_set_name(r, "r_add");
  7881. r = ggml_cpy(lora_ctx.get(), r, dest_t);
  7882. offload_func(r);
  7883. ggml_set_name(r, "r_cpy");
  7884. }
  7885. struct ggml_cgraph * gf = ggml_new_graph(lora_ctx.get());
  7886. ggml_build_forward_expand(gf, r);
  7887. ggml_graph_compute_helper(work_buffer, gf, n_threads);
  7888. // the tensors in the adapter must be sorted such that loraA and loraB of the same tensor are next to each other
  7889. GGML_ASSERT(lora_tensors.size() == 2);
  7890. // we won't need these tensors again, reset the context to save memory
  7891. lora_ctx.reset(ggml_init(params));
  7892. lora_tensors.clear();
  7893. n_tensors++;
  7894. if (n_tensors % 4 == 0) {
  7895. LLAMA_LOG_INFO(".");
  7896. }
  7897. }
  7898. }
  7899. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  7900. LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
  7901. return 0;
  7902. }
  7903. //
  7904. // interface implementation
  7905. //
  7906. struct llama_model_params llama_model_default_params() {
  7907. struct llama_model_params result = {
  7908. /*.n_gpu_layers =*/ 0,
  7909. /*.main_gpu =*/ 0,
  7910. /*.tensor_split =*/ nullptr,
  7911. /*.progress_callback =*/ nullptr,
  7912. /*.progress_callback_user_data =*/ nullptr,
  7913. /*.kv_overrides =*/ nullptr,
  7914. /*.vocab_only =*/ false,
  7915. /*.use_mmap =*/ true,
  7916. /*.use_mlock =*/ false,
  7917. };
  7918. #ifdef GGML_USE_METAL
  7919. result.n_gpu_layers = 1;
  7920. #endif
  7921. return result;
  7922. }
  7923. struct llama_context_params llama_context_default_params() {
  7924. struct llama_context_params result = {
  7925. /*.seed =*/ LLAMA_DEFAULT_SEED,
  7926. /*.n_ctx =*/ 512,
  7927. /*.n_batch =*/ 512,
  7928. /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
  7929. /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
  7930. /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_UNSPECIFIED,
  7931. /*.rope_freq_base =*/ 0.0f,
  7932. /*.rope_freq_scale =*/ 0.0f,
  7933. /*.yarn_ext_factor =*/ -1.0f,
  7934. /*.yarn_attn_factor =*/ 1.0f,
  7935. /*.yarn_beta_fast =*/ 32.0f,
  7936. /*.yarn_beta_slow =*/ 1.0f,
  7937. /*.yarn_orig_ctx =*/ 0,
  7938. /*.type_k =*/ GGML_TYPE_F16,
  7939. /*.type_v =*/ GGML_TYPE_F16,
  7940. /*.mul_mat_q =*/ true,
  7941. /*.logits_all =*/ false,
  7942. /*.embedding =*/ false,
  7943. /*.offload_kqv =*/ true,
  7944. };
  7945. return result;
  7946. }
  7947. struct llama_model_quantize_params llama_model_quantize_default_params() {
  7948. struct llama_model_quantize_params result = {
  7949. /*.nthread =*/ 0,
  7950. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  7951. /*.allow_requantize =*/ false,
  7952. /*.quantize_output_tensor =*/ true,
  7953. /*.only_copy =*/ false,
  7954. /*.pure =*/ false,
  7955. };
  7956. return result;
  7957. }
  7958. int32_t llama_max_devices(void) {
  7959. return LLAMA_MAX_DEVICES;
  7960. }
  7961. bool llama_mmap_supported(void) {
  7962. return llama_mmap::SUPPORTED;
  7963. }
  7964. bool llama_mlock_supported(void) {
  7965. return llama_mlock::SUPPORTED;
  7966. }
  7967. void llama_backend_init(bool numa) {
  7968. ggml_time_init();
  7969. // needed to initialize f16 tables
  7970. {
  7971. struct ggml_init_params params = { 0, NULL, false };
  7972. struct ggml_context * ctx = ggml_init(params);
  7973. ggml_free(ctx);
  7974. }
  7975. if (numa) {
  7976. ggml_numa_init();
  7977. }
  7978. #ifdef GGML_USE_MPI
  7979. ggml_mpi_backend_init();
  7980. #endif
  7981. }
  7982. void llama_backend_free(void) {
  7983. #ifdef GGML_USE_MPI
  7984. ggml_mpi_backend_free();
  7985. #endif
  7986. }
  7987. int64_t llama_time_us(void) {
  7988. return ggml_time_us();
  7989. }
  7990. struct llama_model * llama_load_model_from_file(
  7991. const char * path_model,
  7992. struct llama_model_params params) {
  7993. ggml_time_init();
  7994. llama_model * model = new llama_model;
  7995. unsigned cur_percentage = 0;
  7996. if (params.progress_callback == NULL) {
  7997. params.progress_callback_user_data = &cur_percentage;
  7998. params.progress_callback = [](float progress, void * ctx) {
  7999. unsigned * cur_percentage_p = (unsigned *) ctx;
  8000. unsigned percentage = (unsigned) (100 * progress);
  8001. while (percentage > *cur_percentage_p) {
  8002. *cur_percentage_p = percentage;
  8003. LLAMA_LOG_INFO(".");
  8004. if (percentage >= 100) {
  8005. LLAMA_LOG_INFO("\n");
  8006. }
  8007. }
  8008. return true;
  8009. };
  8010. }
  8011. int status = llama_model_load(path_model, *model, params);
  8012. GGML_ASSERT(status <= 0);
  8013. if (status < 0) {
  8014. if (status == -1) {
  8015. LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  8016. } else if (status == -2) {
  8017. LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  8018. }
  8019. delete model;
  8020. return nullptr;
  8021. }
  8022. return model;
  8023. }
  8024. void llama_free_model(struct llama_model * model) {
  8025. delete model;
  8026. }
  8027. struct llama_context * llama_new_context_with_model(
  8028. struct llama_model * model,
  8029. struct llama_context_params params) {
  8030. if (!model) {
  8031. return nullptr;
  8032. }
  8033. llama_context * ctx = new llama_context(*model);
  8034. const auto & hparams = model->hparams;
  8035. auto & cparams = ctx->cparams;
  8036. cparams.n_batch = params.n_batch;
  8037. cparams.n_threads = params.n_threads;
  8038. cparams.n_threads_batch = params.n_threads_batch;
  8039. cparams.yarn_ext_factor = params.yarn_ext_factor;
  8040. cparams.yarn_attn_factor = params.yarn_attn_factor;
  8041. cparams.yarn_beta_fast = params.yarn_beta_fast;
  8042. cparams.yarn_beta_slow = params.yarn_beta_slow;
  8043. cparams.mul_mat_q = params.mul_mat_q;
  8044. cparams.offload_kqv = params.offload_kqv;
  8045. cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
  8046. cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
  8047. cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
  8048. cparams.n_yarn_orig_ctx = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
  8049. hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx :
  8050. hparams.n_ctx_train;
  8051. auto rope_scaling_type = params.rope_scaling_type;
  8052. if (rope_scaling_type == LLAMA_ROPE_SCALING_UNSPECIFIED) {
  8053. rope_scaling_type = hparams.rope_scaling_type_train;
  8054. }
  8055. if (rope_scaling_type == LLAMA_ROPE_SCALING_NONE) {
  8056. cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
  8057. }
  8058. if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
  8059. cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_YARN ? 1.0f : 0.0f;
  8060. }
  8061. if (params.seed == LLAMA_DEFAULT_SEED) {
  8062. params.seed = time(NULL);
  8063. }
  8064. LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
  8065. LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
  8066. LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
  8067. ctx->rng = std::mt19937(params.seed);
  8068. ctx->logits_all = params.logits_all;
  8069. const ggml_type type_k = params.type_k;
  8070. const ggml_type type_v = params.type_v;
  8071. GGML_ASSERT(hparams.n_embd_head_k % ggml_blck_size(type_k) == 0);
  8072. GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0);
  8073. // reserve memory for context buffers
  8074. if (!hparams.vocab_only) {
  8075. // initialize backend
  8076. #ifdef GGML_USE_METAL
  8077. if (model->n_gpu_layers > 0) {
  8078. ctx->backend = ggml_backend_metal_init();
  8079. if (ctx->backend == nullptr) {
  8080. LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__);
  8081. }
  8082. }
  8083. #elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  8084. // for testing only
  8085. if (model->n_gpu_layers > 0) {
  8086. ctx->backend = ggml_backend_cuda_init(0);
  8087. if (ctx->backend == nullptr) {
  8088. LLAMA_LOG_ERROR("%s: failed to initialize CUDA backend\n", __func__);
  8089. }
  8090. }
  8091. #endif
  8092. if (ctx->backend == nullptr && ggml_backend_buffer_is_host(model->buf)) {
  8093. ctx->backend = ggml_backend_cpu_init();
  8094. if (ctx->backend == nullptr) {
  8095. LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__);
  8096. }
  8097. }
  8098. if (ctx->backend == nullptr) {
  8099. LLAMA_LOG_ERROR("%s: failed to initialize a backend\n", __func__);
  8100. delete ctx;
  8101. return nullptr;
  8102. }
  8103. if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, type_k, type_v,
  8104. cparams.n_ctx, model->n_gpu_layers, cparams.offload_kqv)) {
  8105. LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
  8106. llama_free(ctx);
  8107. return nullptr;
  8108. }
  8109. {
  8110. size_t memory_size_k = 0;
  8111. size_t memory_size_v = 0;
  8112. for (auto & k : ctx->kv_self.k_l) {
  8113. memory_size_k += ggml_nbytes(k);
  8114. }
  8115. for (auto & v : ctx->kv_self.v_l) {
  8116. memory_size_v += ggml_nbytes(v);
  8117. }
  8118. LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
  8119. (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
  8120. ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
  8121. ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
  8122. }
  8123. // resized during inference
  8124. if (params.logits_all) {
  8125. ctx->logits.reserve(cparams.n_ctx*hparams.n_vocab);
  8126. } else {
  8127. ctx->logits.reserve(hparams.n_vocab);
  8128. }
  8129. if (params.embedding){
  8130. ctx->embedding.resize(hparams.n_embd);
  8131. }
  8132. {
  8133. // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data
  8134. ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead());
  8135. // create measure allocator
  8136. ctx->alloc = ggml_allocr_new_measure_from_backend(ctx->backend);
  8137. // build worst-case graph
  8138. int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_batch);
  8139. int n_past = cparams.n_ctx - n_tokens;
  8140. llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  8141. ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0));
  8142. // measure memory requirements for the graph
  8143. size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf);
  8144. LLAMA_LOG_INFO("%s: compute buffer total size = %.2f MiB\n", __func__, (ctx->buf_compute_meta.size() + alloc_size) / 1024.0 / 1024.0);
  8145. // create allocator again with exact memory requirements
  8146. ggml_allocr_free(ctx->alloc);
  8147. ctx->buf_alloc = ggml_backend_alloc_buffer(ctx->backend, alloc_size);
  8148. ctx->alloc = ggml_allocr_new_from_buffer(ctx->buf_alloc);
  8149. #if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
  8150. if (model->n_gpu_layers > 0) {
  8151. // the CPU buffer adds this padding in case the malloc buffer is not aligned, so we need to do the same for the GPU buffer, since we use the same offsets
  8152. ggml_cuda_set_scratch_size(alloc_size + 64);
  8153. LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MiB\n", __func__, alloc_size / 1024.0 / 1024.0);
  8154. // calculate total VRAM usage
  8155. auto add_tensor = [](const ggml_tensor * t, size_t & size) {
  8156. if (t->backend == GGML_BACKEND_GPU || t->backend == GGML_BACKEND_GPU_SPLIT) {
  8157. size += ggml_nbytes(t);
  8158. }
  8159. };
  8160. size_t model_vram_size = 0;
  8161. for (const auto & kv : model->tensors_by_name) {
  8162. add_tensor(kv.second, model_vram_size);
  8163. }
  8164. size_t kv_vram_size = 0;
  8165. for (auto & k : ctx->kv_self.k_l) {
  8166. add_tensor(k, kv_vram_size);
  8167. }
  8168. for (auto & v : ctx->kv_self.v_l) {
  8169. add_tensor(v, kv_vram_size);
  8170. }
  8171. size_t ctx_vram_size = alloc_size + kv_vram_size;
  8172. size_t total_vram_size = model_vram_size + ctx_vram_size;
  8173. LLAMA_LOG_INFO("%s: total VRAM used: %.2f MiB (model: %.2f MiB, context: %.2f MiB)\n", __func__,
  8174. total_vram_size / 1024.0 / 1024.0,
  8175. model_vram_size / 1024.0 / 1024.0,
  8176. ctx_vram_size / 1024.0 / 1024.0);
  8177. }
  8178. #endif
  8179. }
  8180. }
  8181. #ifdef GGML_USE_MPI
  8182. ctx->ctx_mpi = ggml_mpi_init();
  8183. if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
  8184. // Enter a blocking eval loop with dummy input, letting rank=0 drive the process
  8185. // TODO: needs fix after #3228
  8186. GGML_ASSERT(false && "not implemented");
  8187. //const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
  8188. //while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
  8189. llama_backend_free();
  8190. exit(1);
  8191. }
  8192. #endif
  8193. return ctx;
  8194. }
  8195. void llama_free(struct llama_context * ctx) {
  8196. delete ctx;
  8197. }
  8198. const llama_model * llama_get_model(const struct llama_context * ctx) {
  8199. return &ctx->model;
  8200. }
  8201. uint32_t llama_n_ctx(const struct llama_context * ctx) {
  8202. return ctx->cparams.n_ctx;
  8203. }
  8204. uint32_t llama_n_batch(const struct llama_context * ctx) {
  8205. return ctx->cparams.n_batch;
  8206. }
  8207. enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
  8208. return model->vocab.type;
  8209. }
  8210. int32_t llama_n_vocab(const struct llama_model * model) {
  8211. return model->vocab.id_to_token.size();
  8212. }
  8213. int32_t llama_n_ctx_train(const struct llama_model * model) {
  8214. return model->hparams.n_ctx_train;
  8215. }
  8216. int32_t llama_n_embd(const struct llama_model * model) {
  8217. return model->hparams.n_embd;
  8218. }
  8219. float llama_rope_freq_scale_train(const struct llama_model * model) {
  8220. return model->hparams.rope_freq_scale_train;
  8221. }
  8222. int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
  8223. const auto & it = model->gguf_kv.find(key);
  8224. if (it == model->gguf_kv.end()) {
  8225. if (buf_size > 0) {
  8226. buf[0] = '\0';
  8227. }
  8228. return -1;
  8229. }
  8230. return snprintf(buf, buf_size, "%s", it->second.c_str());
  8231. }
  8232. int32_t llama_model_meta_count(const struct llama_model * model) {
  8233. return (int)model->gguf_kv.size();
  8234. }
  8235. int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
  8236. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  8237. if (buf_size > 0) {
  8238. buf[0] = '\0';
  8239. }
  8240. return -1;
  8241. }
  8242. auto it = model->gguf_kv.begin();
  8243. std::advance(it, i);
  8244. return snprintf(buf, buf_size, "%s", it->first.c_str());
  8245. }
  8246. int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
  8247. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  8248. if (buf_size > 0) {
  8249. buf[0] = '\0';
  8250. }
  8251. return -1;
  8252. }
  8253. auto it = model->gguf_kv.begin();
  8254. std::advance(it, i);
  8255. return snprintf(buf, buf_size, "%s", it->second.c_str());
  8256. }
  8257. int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
  8258. return snprintf(buf, buf_size, "%s %s%s %s",
  8259. llama_model_arch_name(model->arch).c_str(),
  8260. model->hparams.n_expert > 0 ? (std::to_string(model->hparams.n_expert) + "x").c_str() : "",
  8261. llama_model_type_name(model->type),
  8262. llama_model_ftype_name(model->ftype).c_str());
  8263. }
  8264. uint64_t llama_model_size(const struct llama_model * model) {
  8265. uint64_t size = 0;
  8266. for (const auto & it : model->tensors_by_name) {
  8267. size += ggml_nbytes(it.second);
  8268. }
  8269. return size;
  8270. }
  8271. uint64_t llama_model_n_params(const struct llama_model * model) {
  8272. uint64_t nparams = 0;
  8273. for (const auto & it : model->tensors_by_name) {
  8274. nparams += ggml_nelements(it.second);
  8275. }
  8276. return nparams;
  8277. }
  8278. struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
  8279. return ggml_get_tensor(model->ctx, name);
  8280. }
  8281. uint32_t llama_model_quantize(
  8282. const char * fname_inp,
  8283. const char * fname_out,
  8284. const llama_model_quantize_params * params) {
  8285. try {
  8286. llama_model_quantize_internal(fname_inp, fname_out, params);
  8287. return 0;
  8288. } catch (const std::exception & err) {
  8289. LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
  8290. return 1;
  8291. }
  8292. }
  8293. int32_t llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, float scale, const char * path_base_model, int32_t n_threads) {
  8294. try {
  8295. return llama_apply_lora_from_file_internal(ctx->model, path_lora, scale, path_base_model, n_threads);
  8296. } catch (const std::exception & err) {
  8297. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  8298. return 1;
  8299. }
  8300. }
  8301. int32_t llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, float scale, const char * path_base_model, int32_t n_threads) {
  8302. try {
  8303. return llama_apply_lora_from_file_internal(*model, path_lora, scale, path_base_model, n_threads);
  8304. } catch (const std::exception & err) {
  8305. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  8306. return 1;
  8307. }
  8308. }
  8309. struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_max_seq) {
  8310. struct llama_kv_cache_view result = {
  8311. /*.n_cells = */ 0,
  8312. /*.n_max_seq = */ n_max_seq,
  8313. /*.token_count = */ 0,
  8314. /*.used_cells = */ llama_get_kv_cache_used_cells(ctx),
  8315. /*.max_contiguous = */ 0,
  8316. /*.max_contiguous_idx = */ -1,
  8317. /*.cells = */ nullptr,
  8318. /*.cells_sequences = */ nullptr,
  8319. };
  8320. return result;
  8321. }
  8322. void llama_kv_cache_view_free(struct llama_kv_cache_view * view) {
  8323. if (view->cells != nullptr) {
  8324. free(view->cells);
  8325. view->cells = nullptr;
  8326. }
  8327. if (view->cells_sequences != nullptr) {
  8328. free(view->cells_sequences);
  8329. view->cells_sequences = nullptr;
  8330. }
  8331. }
  8332. void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) {
  8333. if (uint32_t(view->n_cells) < ctx->kv_self.size || view->cells == nullptr) {
  8334. view->n_cells = int32_t(ctx->kv_self.size);
  8335. void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells);
  8336. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells");
  8337. view->cells = (struct llama_kv_cache_view_cell *)p;
  8338. p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_max_seq * view->n_cells);
  8339. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences");
  8340. view->cells_sequences = (llama_seq_id *)p;
  8341. }
  8342. const std::vector<llama_kv_cell> & kv_cells = ctx->kv_self.cells;
  8343. llama_kv_cache_view_cell * c_curr = view->cells;
  8344. llama_seq_id * cs_curr = view->cells_sequences;
  8345. int32_t used_cells = 0;
  8346. int32_t token_count = 0;
  8347. int32_t curr_contig_idx = -1;
  8348. uint32_t max_contig = 0;
  8349. int32_t max_contig_idx = -1;
  8350. for (int32_t i = 0; i < int32_t(ctx->kv_self.size); i++, c_curr++, cs_curr += view->n_max_seq) {
  8351. const size_t curr_size = kv_cells[i].seq_id.size();
  8352. token_count += curr_size;
  8353. c_curr->pos = kv_cells[i].pos + kv_cells[i].delta;
  8354. if (curr_size > 0) {
  8355. if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) {
  8356. max_contig = i - curr_contig_idx;
  8357. max_contig_idx = curr_contig_idx;
  8358. }
  8359. curr_contig_idx = -1;
  8360. } else if (curr_contig_idx < 0) {
  8361. curr_contig_idx = i;
  8362. }
  8363. int seq_idx = 0;
  8364. for (const llama_seq_id it : kv_cells[i].seq_id) {
  8365. if (seq_idx >= view->n_max_seq) {
  8366. break;
  8367. }
  8368. cs_curr[seq_idx] = it;
  8369. seq_idx++;
  8370. }
  8371. if (seq_idx != 0) {
  8372. used_cells++;
  8373. }
  8374. for (; seq_idx < view->n_max_seq; seq_idx++) {
  8375. cs_curr[seq_idx] = -1;
  8376. }
  8377. }
  8378. if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) {
  8379. max_contig_idx = curr_contig_idx;
  8380. max_contig = kv_cells.size() - curr_contig_idx;
  8381. }
  8382. view->max_contiguous = max_contig;
  8383. view->max_contiguous_idx = max_contig_idx;
  8384. view->token_count = token_count;
  8385. view->used_cells = used_cells;
  8386. if (uint32_t(used_cells) != ctx->kv_self.used) {
  8387. LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n",
  8388. __func__, ctx->kv_self.used, used_cells);
  8389. }
  8390. }
  8391. int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  8392. int result = 0;
  8393. for (uint32_t i = 0; i < ctx->kv_self.size; i++) {
  8394. result += ctx->kv_self.cells[i].seq_id.size();
  8395. }
  8396. return result;
  8397. }
  8398. int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx) {
  8399. return ctx->kv_self.used;
  8400. }
  8401. void llama_kv_cache_clear(struct llama_context * ctx) {
  8402. llama_kv_cache_clear(ctx->kv_self);
  8403. }
  8404. void llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  8405. llama_kv_cache_seq_rm(ctx->kv_self, seq_id, p0, p1);
  8406. }
  8407. void llama_kv_cache_seq_cp(struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  8408. if (seq_id_src == seq_id_dst) {
  8409. return;
  8410. }
  8411. llama_kv_cache_seq_cp(ctx->kv_self, seq_id_src, seq_id_dst, p0, p1);
  8412. }
  8413. void llama_kv_cache_seq_keep(struct llama_context * ctx, llama_seq_id seq_id) {
  8414. llama_kv_cache_seq_keep(ctx->kv_self, seq_id);
  8415. }
  8416. void llama_kv_cache_seq_shift(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) {
  8417. if (delta == 0) {
  8418. return;
  8419. }
  8420. llama_kv_cache_seq_shift(ctx->kv_self, seq_id, p0, p1, delta);
  8421. }
  8422. void llama_kv_cache_seq_div(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
  8423. if (d == 1) {
  8424. return;
  8425. }
  8426. llama_kv_cache_seq_div(ctx->kv_self, seq_id, p0, p1, d);
  8427. }
  8428. // Returns the *maximum* size of the state
  8429. size_t llama_get_state_size(const struct llama_context * ctx) {
  8430. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  8431. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  8432. const size_t s_rng_size = sizeof(size_t);
  8433. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  8434. const size_t s_logits_capacity = sizeof(size_t);
  8435. const size_t s_logits_size = sizeof(size_t);
  8436. const size_t s_logits = ctx->logits.capacity() * sizeof(float);
  8437. const size_t s_embedding_size = sizeof(size_t);
  8438. const size_t s_embedding = ctx->embedding.size() * sizeof(float);
  8439. const size_t s_kv_size = sizeof(size_t);
  8440. const size_t s_kv_ntok = sizeof(int);
  8441. const size_t s_kv = ggml_backend_buffer_get_size(ctx->kv_self.buf);
  8442. const size_t s_total = (
  8443. + s_rng_size
  8444. + s_rng
  8445. + s_logits_capacity
  8446. + s_logits_size
  8447. + s_logits
  8448. + s_embedding_size
  8449. + s_embedding
  8450. + s_kv_size
  8451. + s_kv_ntok
  8452. + s_kv
  8453. );
  8454. return s_total;
  8455. }
  8456. // llama_context_data
  8457. struct llama_data_context {
  8458. virtual void write(const void * src, size_t size) = 0;
  8459. virtual size_t get_size_written() = 0;
  8460. virtual ~llama_data_context() = default;
  8461. };
  8462. struct llama_data_buffer_context : llama_data_context {
  8463. uint8_t * ptr;
  8464. size_t size_written = 0;
  8465. llama_data_buffer_context(uint8_t * p) : ptr(p) {}
  8466. void write(const void * src, size_t size) override {
  8467. memcpy(ptr, src, size);
  8468. ptr += size;
  8469. size_written += size;
  8470. }
  8471. size_t get_size_written() override {
  8472. return size_written;
  8473. }
  8474. };
  8475. struct llama_data_file_context : llama_data_context {
  8476. llama_file * file;
  8477. size_t size_written = 0;
  8478. llama_data_file_context(llama_file * f) : file(f) {}
  8479. void write(const void * src, size_t size) override {
  8480. file->write_raw(src, size);
  8481. size_written += size;
  8482. }
  8483. size_t get_size_written() override {
  8484. return size_written;
  8485. }
  8486. };
  8487. /** copy state data into either a buffer or file depending on the passed in context
  8488. *
  8489. * file context:
  8490. * llama_file file("/path", "wb");
  8491. * llama_data_file_context data_ctx(&file);
  8492. * llama_copy_state_data(ctx, &data_ctx);
  8493. *
  8494. * buffer context:
  8495. * std::vector<uint8_t> buf(max_size, 0);
  8496. * llama_data_buffer_context data_ctx(&buf.data());
  8497. * llama_copy_state_data(ctx, &data_ctx);
  8498. *
  8499. */
  8500. static void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
  8501. // copy rng
  8502. {
  8503. std::stringstream rng_ss;
  8504. rng_ss << ctx->rng;
  8505. const size_t rng_size = rng_ss.str().size();
  8506. char rng_buf[LLAMA_MAX_RNG_STATE];
  8507. memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE);
  8508. memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
  8509. data_ctx->write(&rng_size, sizeof(rng_size));
  8510. data_ctx->write(&rng_buf[0], LLAMA_MAX_RNG_STATE);
  8511. }
  8512. // copy logits
  8513. {
  8514. const size_t logits_cap = ctx->logits.capacity();
  8515. const size_t logits_size = ctx->logits.size();
  8516. data_ctx->write(&logits_cap, sizeof(logits_cap));
  8517. data_ctx->write(&logits_size, sizeof(logits_size));
  8518. if (logits_size) {
  8519. data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
  8520. }
  8521. // If there is a gap between the size and the capacity, write padding
  8522. size_t padding_size = (logits_cap - logits_size) * sizeof(float);
  8523. if (padding_size > 0) {
  8524. std::vector<uint8_t> padding(padding_size, 0); // Create a buffer filled with zeros
  8525. data_ctx->write(padding.data(), padding_size);
  8526. }
  8527. }
  8528. // copy embeddings
  8529. {
  8530. const size_t embedding_size = ctx->embedding.size();
  8531. data_ctx->write(&embedding_size, sizeof(embedding_size));
  8532. if (embedding_size) {
  8533. data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float));
  8534. }
  8535. }
  8536. // copy kv cache
  8537. {
  8538. const auto & kv_self = ctx->kv_self;
  8539. const auto & hparams = ctx->model.hparams;
  8540. const auto & cparams = ctx->cparams;
  8541. const auto n_layer = hparams.n_layer;
  8542. const auto n_embd_k_gqa = hparams.n_embd_k_gqa();
  8543. const auto n_embd_v_gqa = hparams.n_embd_v_gqa();
  8544. const auto n_ctx = cparams.n_ctx;
  8545. const size_t kv_buf_size = ggml_backend_buffer_get_size(kv_self.buf);
  8546. const uint32_t kv_head = kv_self.head;
  8547. const uint32_t kv_size = kv_self.size;
  8548. const uint32_t kv_used = kv_self.used;
  8549. data_ctx->write(&kv_buf_size, sizeof(kv_buf_size));
  8550. data_ctx->write(&kv_head, sizeof(kv_head));
  8551. data_ctx->write(&kv_size, sizeof(kv_size));
  8552. data_ctx->write(&kv_used, sizeof(kv_used));
  8553. if (kv_buf_size) {
  8554. const size_t elt_size = ggml_element_size(kv_self.k_l[0]);
  8555. ggml_context * cpy_ctx = ggml_init({ 6*n_layer*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
  8556. ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
  8557. std::vector<struct ggml_tensor *> kout2d(n_layer);
  8558. std::vector<struct ggml_tensor *> vout2d(n_layer);
  8559. for (int il = 0; il < (int) n_layer; ++il) {
  8560. kout2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd_k_gqa, kv_head);
  8561. vout2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd_v_gqa);
  8562. ggml_tensor * k2d = ggml_view_2d(cpy_ctx, kv_self.k_l[il],
  8563. n_embd_k_gqa, kv_head,
  8564. elt_size*n_embd_k_gqa, 0);
  8565. ggml_tensor * v2d = ggml_view_2d(cpy_ctx, kv_self.v_l[il],
  8566. kv_head, n_embd_v_gqa,
  8567. elt_size*n_ctx, 0);
  8568. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k2d, kout2d[il]));
  8569. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v2d, vout2d[il]));
  8570. }
  8571. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(cpy_ctx, ctx->backend);
  8572. ggml_backend_graph_compute(ctx->backend, gf);
  8573. std::vector<uint8_t> tmp_buf;
  8574. for (int il = 0; il < (int) n_layer; ++il) {
  8575. tmp_buf.resize(ggml_nbytes(kout2d[il]));
  8576. ggml_backend_tensor_get(kout2d[il], tmp_buf.data(), 0, tmp_buf.size());
  8577. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  8578. tmp_buf.resize(ggml_nbytes(vout2d[il]));
  8579. ggml_backend_tensor_get(vout2d[il], tmp_buf.data(), 0, tmp_buf.size());
  8580. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  8581. }
  8582. ggml_free(cpy_ctx);
  8583. ggml_backend_buffer_free(buf);
  8584. }
  8585. for (uint32_t i = 0; i < kv_size; ++i) {
  8586. const auto & cell = kv_self.cells[i];
  8587. const llama_pos pos = cell.pos;
  8588. const size_t seq_id_size = cell.seq_id.size();
  8589. data_ctx->write(&pos, sizeof(pos));
  8590. data_ctx->write(&seq_id_size, sizeof(seq_id_size));
  8591. for (auto seq_id : cell.seq_id) {
  8592. data_ctx->write(&seq_id, sizeof(seq_id));
  8593. }
  8594. }
  8595. }
  8596. }
  8597. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  8598. llama_data_buffer_context data_ctx(dst);
  8599. llama_copy_state_data_internal(ctx, &data_ctx);
  8600. return data_ctx.get_size_written();
  8601. }
  8602. // Sets the state reading from the specified source address
  8603. size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
  8604. uint8_t * inp = src;
  8605. // set rng
  8606. {
  8607. size_t rng_size;
  8608. char rng_buf[LLAMA_MAX_RNG_STATE];
  8609. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  8610. memcpy(&rng_buf[0], inp, LLAMA_MAX_RNG_STATE); inp += LLAMA_MAX_RNG_STATE;
  8611. std::stringstream rng_ss;
  8612. rng_ss.str(std::string(&rng_buf[0], rng_size));
  8613. rng_ss >> ctx->rng;
  8614. GGML_ASSERT(!rng_ss.fail());
  8615. }
  8616. // set logits
  8617. {
  8618. size_t logits_cap;
  8619. size_t logits_size;
  8620. memcpy(&logits_cap, inp, sizeof(logits_cap)); inp += sizeof(logits_cap);
  8621. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  8622. GGML_ASSERT(ctx->logits.capacity() == logits_cap);
  8623. if (logits_size) {
  8624. ctx->logits.resize(logits_size);
  8625. memcpy(ctx->logits.data(), inp, logits_size * sizeof(float));
  8626. }
  8627. inp += logits_cap * sizeof(float);
  8628. }
  8629. // set embeddings
  8630. {
  8631. size_t embedding_size;
  8632. memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size);
  8633. GGML_ASSERT(ctx->embedding.capacity() == embedding_size);
  8634. if (embedding_size) {
  8635. memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float));
  8636. inp += embedding_size * sizeof(float);
  8637. }
  8638. }
  8639. // set kv cache
  8640. {
  8641. const auto & kv_self = ctx->kv_self;
  8642. const auto & hparams = ctx->model.hparams;
  8643. const auto & cparams = ctx->cparams;
  8644. const int n_layer = hparams.n_layer;
  8645. const int n_embd_k_gqa = hparams.n_embd_k_gqa();
  8646. const int n_embd_v_gqa = hparams.n_embd_v_gqa();
  8647. const int n_ctx = cparams.n_ctx;
  8648. size_t kv_buf_size;
  8649. uint32_t kv_head;
  8650. uint32_t kv_size;
  8651. uint32_t kv_used;
  8652. memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size);
  8653. memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head);
  8654. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  8655. memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used);
  8656. if (kv_buf_size) {
  8657. GGML_ASSERT(ggml_backend_buffer_get_size(kv_self.buf) == kv_buf_size);
  8658. const size_t elt_size = ggml_element_size(kv_self.k_l[0]);
  8659. ggml_context * cpy_ctx = ggml_init({ 6*n_layer*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
  8660. ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
  8661. std::vector<struct ggml_tensor *> kin2d(n_layer);
  8662. std::vector<struct ggml_tensor *> vin2d(n_layer);
  8663. for (int il = 0; il < n_layer; ++il) {
  8664. kin2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd_k_gqa, kv_head);
  8665. vin2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd_v_gqa);
  8666. ggml_tensor * k2d = ggml_view_2d(cpy_ctx, kv_self.k_l[il],
  8667. n_embd_k_gqa, kv_head,
  8668. elt_size*n_embd_k_gqa, 0);
  8669. ggml_tensor * v2d = ggml_view_2d(cpy_ctx, kv_self.v_l[il],
  8670. kv_head, n_embd_v_gqa,
  8671. elt_size*n_ctx, 0);
  8672. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin2d[il], k2d));
  8673. ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin2d[il], v2d));
  8674. }
  8675. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(cpy_ctx, ctx->backend);
  8676. // load data into the tensors
  8677. for (int il = 0; il < n_layer; ++il) {
  8678. ggml_backend_tensor_set(kin2d[il], inp, 0, ggml_nbytes(kin2d[il]));
  8679. inp += ggml_nbytes(kin2d[il]);
  8680. ggml_backend_tensor_set(vin2d[il], inp, 0, ggml_nbytes(vin2d[il]));
  8681. inp += ggml_nbytes(vin2d[il]);
  8682. }
  8683. ggml_backend_graph_compute(ctx->backend, gf);
  8684. ggml_free(cpy_ctx);
  8685. ggml_backend_buffer_free(buf);
  8686. }
  8687. ctx->kv_self.head = kv_head;
  8688. ctx->kv_self.size = kv_size;
  8689. ctx->kv_self.used = kv_used;
  8690. ctx->kv_self.cells.resize(kv_size);
  8691. for (uint32_t i = 0; i < kv_size; ++i) {
  8692. llama_pos pos;
  8693. size_t seq_id_size;
  8694. memcpy(&pos, inp, sizeof(pos)); inp += sizeof(pos);
  8695. memcpy(&seq_id_size, inp, sizeof(seq_id_size)); inp += sizeof(seq_id_size);
  8696. ctx->kv_self.cells[i].pos = pos;
  8697. llama_seq_id seq_id;
  8698. for (size_t j = 0; j < seq_id_size; ++j) {
  8699. memcpy(&seq_id, inp, sizeof(seq_id)); inp += sizeof(seq_id);
  8700. ctx->kv_self.cells[i].seq_id.insert(seq_id);
  8701. }
  8702. }
  8703. }
  8704. const size_t nread = inp - src;
  8705. const size_t max_size = llama_get_state_size(ctx);
  8706. GGML_ASSERT(nread <= max_size);
  8707. return nread;
  8708. }
  8709. static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  8710. llama_file file(path_session, "rb");
  8711. // sanity checks
  8712. {
  8713. const uint32_t magic = file.read_u32();
  8714. const uint32_t version = file.read_u32();
  8715. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  8716. LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  8717. return false;
  8718. }
  8719. llama_hparams session_hparams;
  8720. file.read_raw(&session_hparams, sizeof(llama_hparams));
  8721. if (session_hparams != ctx->model.hparams) {
  8722. LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
  8723. return false;
  8724. }
  8725. }
  8726. // load the prompt
  8727. {
  8728. const uint32_t n_token_count = file.read_u32();
  8729. if (n_token_count > n_token_capacity) {
  8730. LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  8731. return false;
  8732. }
  8733. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  8734. *n_token_count_out = n_token_count;
  8735. }
  8736. // restore the context state
  8737. {
  8738. const size_t n_state_size_cur = file.size - file.tell();
  8739. const size_t n_state_size_max = llama_get_state_size(ctx);
  8740. if (n_state_size_cur > n_state_size_max) {
  8741. LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  8742. return false;
  8743. }
  8744. std::vector<uint8_t> state_data(n_state_size_max);
  8745. file.read_raw(state_data.data(), n_state_size_cur);
  8746. llama_set_state_data(ctx, state_data.data());
  8747. }
  8748. return true;
  8749. }
  8750. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  8751. try {
  8752. return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  8753. } catch (const std::exception & err) {
  8754. LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
  8755. return false;
  8756. }
  8757. }
  8758. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  8759. llama_file file(path_session, "wb");
  8760. file.write_u32(LLAMA_SESSION_MAGIC);
  8761. file.write_u32(LLAMA_SESSION_VERSION);
  8762. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  8763. // save the prompt
  8764. file.write_u32((uint32_t) n_token_count);
  8765. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  8766. // save the context state using stream saving
  8767. llama_data_file_context data_ctx(&file);
  8768. llama_copy_state_data_internal(ctx, &data_ctx);
  8769. return true;
  8770. }
  8771. int llama_eval(
  8772. struct llama_context * ctx,
  8773. llama_token * tokens,
  8774. int32_t n_tokens,
  8775. int32_t n_past) {
  8776. llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
  8777. const int ret = llama_decode_internal(*ctx, llama_batch_get_one(tokens, n_tokens, n_past, 0));
  8778. if (ret < 0) {
  8779. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  8780. }
  8781. return ret;
  8782. }
  8783. int llama_eval_embd(
  8784. struct llama_context * ctx,
  8785. float * embd,
  8786. int32_t n_tokens,
  8787. int32_t n_past) {
  8788. llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
  8789. llama_batch batch = { n_tokens, nullptr, embd, nullptr, nullptr, nullptr, nullptr, n_past, 1, 0, };
  8790. const int ret = llama_decode_internal(*ctx, batch);
  8791. if (ret < 0) {
  8792. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  8793. }
  8794. return ret;
  8795. }
  8796. void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch) {
  8797. ctx->cparams.n_threads = n_threads;
  8798. ctx->cparams.n_threads_batch = n_threads_batch;
  8799. }
  8800. struct llama_batch llama_batch_get_one(
  8801. llama_token * tokens,
  8802. int32_t n_tokens,
  8803. llama_pos pos_0,
  8804. llama_seq_id seq_id) {
  8805. return {
  8806. /*n_tokens =*/ n_tokens,
  8807. /*tokens =*/ tokens,
  8808. /*embd =*/ nullptr,
  8809. /*pos =*/ nullptr,
  8810. /*n_seq_id =*/ nullptr,
  8811. /*seq_id =*/ nullptr,
  8812. /*logits =*/ nullptr,
  8813. /*all_pos_0 =*/ pos_0,
  8814. /*all_pos_1 =*/ 1,
  8815. /*all_seq_id =*/ seq_id,
  8816. };
  8817. }
  8818. struct llama_batch llama_batch_init(int32_t n_tokens, int32_t embd, int32_t n_seq_max) {
  8819. llama_batch batch = { 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, 0, 0, 0, };
  8820. if (embd) {
  8821. batch.embd = (float *) malloc(sizeof(float) * n_tokens * embd);
  8822. } else {
  8823. batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens);
  8824. }
  8825. batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens);
  8826. batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens);
  8827. batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * n_tokens);
  8828. for (int i = 0; i < n_tokens; ++i) {
  8829. batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
  8830. }
  8831. batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens);
  8832. return batch;
  8833. }
  8834. void llama_batch_free(struct llama_batch batch) {
  8835. if (batch.token) free(batch.token);
  8836. if (batch.embd) free(batch.embd);
  8837. if (batch.pos) free(batch.pos);
  8838. if (batch.n_seq_id) free(batch.n_seq_id);
  8839. if (batch.seq_id) {
  8840. for (int i = 0; i < batch.n_tokens; ++i) {
  8841. free(batch.seq_id[i]);
  8842. }
  8843. free(batch.seq_id);
  8844. }
  8845. if (batch.logits) free(batch.logits);
  8846. }
  8847. int32_t llama_decode(
  8848. struct llama_context * ctx,
  8849. struct llama_batch batch) {
  8850. const int ret = llama_decode_internal(*ctx, batch);
  8851. if (ret < 0) {
  8852. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  8853. }
  8854. return ret;
  8855. }
  8856. float * llama_get_logits(struct llama_context * ctx) {
  8857. return ctx->logits.data();
  8858. }
  8859. float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
  8860. assert(ctx->logits_valid.at(i));
  8861. return ctx->logits.data() + i*ctx->model.hparams.n_vocab;
  8862. }
  8863. float * llama_get_embeddings(struct llama_context * ctx) {
  8864. return ctx->embedding.data();
  8865. }
  8866. const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
  8867. return model->vocab.id_to_token[token].text.c_str();
  8868. }
  8869. float llama_token_get_score(const struct llama_model * model, llama_token token) {
  8870. return model->vocab.id_to_token[token].score;
  8871. }
  8872. llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token) {
  8873. return model->vocab.id_to_token[token].type;
  8874. }
  8875. llama_token llama_token_bos(const struct llama_model * model) {
  8876. return model->vocab.special_bos_id;
  8877. }
  8878. llama_token llama_token_eos(const struct llama_model * model) {
  8879. return model->vocab.special_eos_id;
  8880. }
  8881. llama_token llama_token_nl(const struct llama_model * model) {
  8882. return model->vocab.linefeed_id;
  8883. }
  8884. int32_t llama_add_bos_token(const struct llama_model * model) {
  8885. return model->vocab.special_add_bos;
  8886. }
  8887. int32_t llama_add_eos_token(const struct llama_model * model) {
  8888. return model->vocab.special_add_eos;
  8889. }
  8890. llama_token llama_token_prefix(const struct llama_model * model) {
  8891. return model->vocab.special_prefix_id;
  8892. }
  8893. llama_token llama_token_middle(const struct llama_model * model) {
  8894. return model->vocab.special_middle_id;
  8895. }
  8896. llama_token llama_token_suffix(const struct llama_model * model) {
  8897. return model->vocab.special_suffix_id;
  8898. }
  8899. llama_token llama_token_eot(const struct llama_model * model) {
  8900. return model->vocab.special_eot_id;
  8901. }
  8902. int32_t llama_tokenize(
  8903. const struct llama_model * model,
  8904. const char * text,
  8905. int32_t text_len,
  8906. llama_token * tokens,
  8907. int32_t n_max_tokens,
  8908. bool add_bos,
  8909. bool special) {
  8910. auto res = llama_tokenize_internal(model->vocab, std::string(text, text_len), add_bos, special);
  8911. if (n_max_tokens < (int) res.size()) {
  8912. // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
  8913. return -((int) res.size());
  8914. }
  8915. for (size_t i = 0; i < res.size(); i++) {
  8916. tokens[i] = res[i];
  8917. }
  8918. return res.size();
  8919. }
  8920. static std::string llama_decode_text(const std::string & text) {
  8921. std::string decoded_text;
  8922. auto unicode_sequences = codepoints_from_utf8(text);
  8923. for (auto& unicode_sequence : unicode_sequences) {
  8924. decoded_text += unicode_to_bytes_bpe(codepoint_to_utf8(unicode_sequence));
  8925. }
  8926. return decoded_text;
  8927. }
  8928. // does not write null-terminator to buf
  8929. int32_t llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int32_t length) {
  8930. if (0 <= token && token < llama_n_vocab(model)) {
  8931. switch (llama_vocab_get_type(model->vocab)) {
  8932. case LLAMA_VOCAB_TYPE_SPM: {
  8933. if (llama_is_normal_token(model->vocab, token)) {
  8934. std::string result = model->vocab.id_to_token[token].text;
  8935. llama_unescape_whitespace(result);
  8936. if (length < (int) result.length()) {
  8937. return -(int) result.length();
  8938. }
  8939. memcpy(buf, result.c_str(), result.length());
  8940. return result.length();
  8941. } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
  8942. if (length < 3) {
  8943. return -3;
  8944. }
  8945. memcpy(buf, "\xe2\x96\x85", 3);
  8946. return 3;
  8947. } else if (llama_is_control_token(model->vocab, token)) {
  8948. ;
  8949. } else if (llama_is_byte_token(model->vocab, token)) {
  8950. if (length < 1) {
  8951. return -1;
  8952. }
  8953. buf[0] = llama_token_to_byte(model->vocab, token);
  8954. return 1;
  8955. } else {
  8956. // TODO: for now we accept all unsupported token types,
  8957. // suppressing them like CONTROL tokens.
  8958. // GGML_ASSERT(false);
  8959. }
  8960. break;
  8961. }
  8962. case LLAMA_VOCAB_TYPE_BPE: {
  8963. if (llama_is_normal_token(model->vocab, token)) {
  8964. std::string result = model->vocab.id_to_token[token].text;
  8965. result = llama_decode_text(result);
  8966. if (length < (int) result.length()) {
  8967. return -(int) result.length();
  8968. }
  8969. memcpy(buf, result.c_str(), result.length());
  8970. return result.length();
  8971. } else if (llama_is_control_token(model->vocab, token)) {
  8972. ;
  8973. } else {
  8974. // TODO: for now we accept all unsupported token types,
  8975. // suppressing them like CONTROL tokens.
  8976. // GGML_ASSERT(false);
  8977. }
  8978. break;
  8979. }
  8980. default:
  8981. GGML_ASSERT(false);
  8982. }
  8983. }
  8984. return 0;
  8985. }
  8986. struct llama_timings llama_get_timings(struct llama_context * ctx) {
  8987. struct llama_timings result = {
  8988. /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
  8989. /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
  8990. /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
  8991. /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
  8992. /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
  8993. /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
  8994. /*.n_sample =*/ std::max(1, ctx->n_sample),
  8995. /*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
  8996. /*.n_eval =*/ std::max(1, ctx->n_eval),
  8997. };
  8998. return result;
  8999. }
  9000. void llama_print_timings(struct llama_context * ctx) {
  9001. const llama_timings timings = llama_get_timings(ctx);
  9002. LLAMA_LOG_INFO("\n");
  9003. LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms);
  9004. LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  9005. __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
  9006. LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  9007. __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
  9008. LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  9009. __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
  9010. LLAMA_LOG_INFO("%s: total time = %10.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
  9011. }
  9012. void llama_reset_timings(struct llama_context * ctx) {
  9013. ctx->t_start_us = ggml_time_us();
  9014. ctx->t_sample_us = ctx->n_sample = 0;
  9015. ctx->t_eval_us = ctx->n_eval = 0;
  9016. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  9017. }
  9018. const char * llama_print_system_info(void) {
  9019. static std::string s;
  9020. s = "";
  9021. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  9022. s += "AVX_VNNI = " + std::to_string(ggml_cpu_has_avx_vnni()) + " | ";
  9023. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  9024. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  9025. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  9026. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  9027. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  9028. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  9029. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  9030. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  9031. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  9032. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  9033. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  9034. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  9035. s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
  9036. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  9037. return s.c_str();
  9038. }
  9039. void llama_dump_timing_info_yaml(FILE * stream, const llama_context * ctx) {
  9040. fprintf(stream, "\n");
  9041. fprintf(stream, "###########\n");
  9042. fprintf(stream, "# Timings #\n");
  9043. fprintf(stream, "###########\n");
  9044. fprintf(stream, "\n");
  9045. fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
  9046. 1.0e-3 * ctx->t_eval_us / ctx->n_eval);
  9047. fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
  9048. 1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
  9049. fprintf(stream, "mst_sample: %.2f # ms / token during sampling\n",
  9050. 1.0e-3 * ctx->t_sample_us / ctx->n_sample);
  9051. fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
  9052. fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
  9053. fprintf(stream, "n_sample: %d # number of sampled tokens\n", ctx->n_sample);
  9054. fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
  9055. fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
  9056. fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
  9057. fprintf(stream, "t_sample_us: %" PRId64 " # total microseconds spent sampling\n", ctx->t_sample_us);
  9058. fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
  9059. 1.0e6 * ctx->n_eval / ctx->t_eval_us);
  9060. fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
  9061. 1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
  9062. fprintf(stream, "ts_sample: %.2f # tokens / second during sampling\n",
  9063. 1.0e6 * ctx->n_sample / ctx->t_sample_us);
  9064. }
  9065. // For internal test use
  9066. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
  9067. struct llama_context * ctx
  9068. ) {
  9069. return ctx->model.tensors_by_name;
  9070. }
  9071. void llama_log_set(ggml_log_callback log_callback, void * user_data) {
  9072. g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
  9073. g_state.log_callback_user_data = user_data;
  9074. #ifdef GGML_USE_METAL
  9075. ggml_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
  9076. #endif
  9077. }
  9078. static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
  9079. va_list args_copy;
  9080. va_copy(args_copy, args);
  9081. char buffer[128];
  9082. int len = vsnprintf(buffer, 128, format, args);
  9083. if (len < 128) {
  9084. g_state.log_callback(level, buffer, g_state.log_callback_user_data);
  9085. } else {
  9086. char* buffer2 = new char[len+1];
  9087. vsnprintf(buffer2, len+1, format, args_copy);
  9088. buffer2[len] = 0;
  9089. g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
  9090. delete[] buffer2;
  9091. }
  9092. va_end(args_copy);
  9093. }
  9094. static void llama_log_internal(ggml_log_level level, const char * format, ...) {
  9095. va_list args;
  9096. va_start(args, format);
  9097. llama_log_internal_v(level, format, args);
  9098. va_end(args);
  9099. }
  9100. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
  9101. (void) level;
  9102. (void) user_data;
  9103. fputs(text, stderr);
  9104. fflush(stderr);
  9105. }