llama.cpp 716 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358173591736017361173621736317364173651736617367173681736917370173711737217373173741737517376173771737817379173801738117382173831738417385173861738717388173891739017391173921739317394173951739617397173981739917400174011740217403174041740517406174071740817409174101741117412174131741417415174161741717418174191742017421174221742317424174251742617427174281742917430174311743217433174341743517436174371743817439174401744117442174431744417445174461744717448174491745017451174521745317454174551745617457174581745917460174611746217463174641746517466174671746817469174701747117472174731747417475174761747717478174791748017481174821748317484174851748617487174881748917490174911749217493174941749517496174971749817499175001750117502175031750417505175061750717508175091751017511175121751317514175151751617517175181751917520175211752217523175241752517526175271752817529175301753117532175331753417535175361753717538175391754017541175421754317544175451754617547175481754917550175511755217553175541755517556175571755817559175601756117562175631756417565175661756717568175691757017571175721757317574175751757617577175781757917580175811758217583175841758517586175871758817589175901759117592175931759417595175961759717598175991760017601176021760317604176051760617607176081760917610176111761217613176141761517616176171761817619176201762117622176231762417625176261762717628176291763017631176321763317634176351763617637176381763917640176411764217643176441764517646176471764817649176501765117652176531765417655176561765717658176591766017661176621766317664176651766617667176681766917670176711767217673176741767517676176771767817679176801768117682176831768417685176861768717688176891769017691176921769317694176951769617697176981769917700177011770217703177041770517706177071770817709177101771117712177131771417715177161771717718177191772017721177221772317724177251772617727177281772917730177311773217733177341773517736177371773817739177401774117742177431774417745177461774717748177491775017751177521775317754177551775617757177581775917760177611776217763177641776517766177671776817769177701777117772177731777417775177761777717778177791778017781177821778317784177851778617787177881778917790177911779217793177941779517796177971779817799178001780117802178031780417805178061780717808178091781017811178121781317814178151781617817178181781917820178211782217823178241782517826178271782817829178301783117832178331783417835178361783717838178391784017841178421784317844178451784617847178481784917850178511785217853178541785517856178571785817859178601786117862178631786417865178661786717868178691787017871178721787317874178751787617877178781787917880178811788217883178841788517886178871788817889178901789117892178931789417895178961789717898178991790017901179021790317904179051790617907179081790917910179111791217913179141791517916179171791817919
  1. #define LLAMA_API_INTERNAL
  2. #include "llama.h"
  3. #include "unicode.h"
  4. #include "ggml.h"
  5. #include "ggml-alloc.h"
  6. #include "ggml-backend.h"
  7. #ifdef GGML_USE_RPC
  8. # include "ggml-rpc.h"
  9. #endif
  10. #ifdef GGML_USE_CUDA
  11. # include "ggml-cuda.h"
  12. #elif defined(GGML_USE_CLBLAST)
  13. # include "ggml-opencl.h"
  14. #elif defined(GGML_USE_VULKAN)
  15. # include "ggml-vulkan.h"
  16. #elif defined(GGML_USE_SYCL)
  17. # include "ggml-sycl.h"
  18. #elif defined(GGML_USE_KOMPUTE)
  19. # include "ggml-kompute.h"
  20. #endif
  21. #ifdef GGML_USE_METAL
  22. # include "ggml-metal.h"
  23. #endif
  24. #ifndef QK_K
  25. # ifdef GGML_QKK_64
  26. # define QK_K 64
  27. # else
  28. # define QK_K 256
  29. # endif
  30. #endif
  31. #ifdef __has_include
  32. #if __has_include(<unistd.h>)
  33. #include <unistd.h>
  34. #if defined(_POSIX_MAPPED_FILES)
  35. #include <sys/mman.h>
  36. #include <fcntl.h>
  37. #endif
  38. #if defined(_POSIX_MEMLOCK_RANGE)
  39. #include <sys/resource.h>
  40. #endif
  41. #endif
  42. #endif
  43. #if defined(_WIN32)
  44. #define WIN32_LEAN_AND_MEAN
  45. #ifndef NOMINMAX
  46. #define NOMINMAX
  47. #endif
  48. #include <windows.h>
  49. #ifndef PATH_MAX
  50. #define PATH_MAX MAX_PATH
  51. #endif
  52. #include <io.h>
  53. #endif
  54. #include <algorithm>
  55. #include <array>
  56. #include <cassert>
  57. #include <cctype>
  58. #include <cfloat>
  59. #include <cinttypes>
  60. #include <climits>
  61. #include <cmath>
  62. #include <cstdarg>
  63. #include <cstddef>
  64. #include <cstdint>
  65. #include <cstdio>
  66. #include <cstring>
  67. #include <ctime>
  68. #include <forward_list>
  69. #include <fstream>
  70. #include <functional>
  71. #include <future>
  72. #include <initializer_list>
  73. #include <locale>
  74. #include <map>
  75. #include <memory>
  76. #include <mutex>
  77. #include <numeric>
  78. #include <queue>
  79. #include <random>
  80. #include <regex>
  81. #include <set>
  82. #include <sstream>
  83. #include <thread>
  84. #include <type_traits>
  85. #include <unordered_map>
  86. #if defined(_MSC_VER)
  87. #pragma warning(disable: 4244 4267) // possible loss of data
  88. #endif
  89. #ifdef __GNUC__
  90. #ifdef __MINGW32__
  91. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  92. #else
  93. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  94. #endif
  95. #else
  96. #define LLAMA_ATTRIBUTE_FORMAT(...)
  97. #endif
  98. #define LLAMA_MAX_NODES 8192
  99. #define LLAMA_MAX_EXPERTS 60
  100. //
  101. // logging
  102. //
  103. LLAMA_ATTRIBUTE_FORMAT(2, 3)
  104. static void llama_log_internal (ggml_log_level level, const char* format, ...);
  105. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
  106. #define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
  107. #define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
  108. #define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  109. //
  110. // helpers
  111. //
  112. static size_t utf8_len(char src) {
  113. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  114. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  115. return lookup[highbits];
  116. }
  117. static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
  118. std::string result;
  119. for (size_t pos = 0; ; pos += search.length()) {
  120. auto new_pos = s.find(search, pos);
  121. if (new_pos == std::string::npos) {
  122. result += s.substr(pos, s.size() - pos);
  123. break;
  124. }
  125. result += s.substr(pos, new_pos - pos) + replace;
  126. pos = new_pos;
  127. }
  128. s = std::move(result);
  129. }
  130. static bool is_float_close(float a, float b, float abs_tol) {
  131. // Check for non-negative tolerance
  132. if (abs_tol < 0.0) {
  133. throw std::invalid_argument("Tolerance must be non-negative");
  134. }
  135. // Exact equality check
  136. if (a == b) {
  137. return true;
  138. }
  139. // Check for infinities
  140. if (std::isinf(a) || std::isinf(b)) {
  141. return false;
  142. }
  143. // Regular comparison using the provided absolute tolerance
  144. return std::fabs(b - a) <= abs_tol;
  145. }
  146. static void zeros(std::ofstream & file, size_t n) {
  147. char zero = 0;
  148. for (size_t i = 0; i < n; ++i) {
  149. file.write(&zero, 1);
  150. }
  151. }
  152. LLAMA_ATTRIBUTE_FORMAT(1, 2)
  153. static std::string format(const char * fmt, ...) {
  154. va_list ap;
  155. va_list ap2;
  156. va_start(ap, fmt);
  157. va_copy(ap2, ap);
  158. int size = vsnprintf(NULL, 0, fmt, ap);
  159. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  160. std::vector<char> buf(size + 1);
  161. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  162. GGML_ASSERT(size2 == size);
  163. va_end(ap2);
  164. va_end(ap);
  165. return std::string(buf.data(), size);
  166. }
  167. //
  168. // gguf constants (sync with gguf.py)
  169. //
  170. enum llm_arch {
  171. LLM_ARCH_LLAMA,
  172. LLM_ARCH_FALCON,
  173. LLM_ARCH_BAICHUAN,
  174. LLM_ARCH_GROK,
  175. LLM_ARCH_GPT2,
  176. LLM_ARCH_GPTJ,
  177. LLM_ARCH_GPTNEOX,
  178. LLM_ARCH_MPT,
  179. LLM_ARCH_STARCODER,
  180. LLM_ARCH_REFACT,
  181. LLM_ARCH_BERT,
  182. LLM_ARCH_NOMIC_BERT,
  183. LLM_ARCH_JINA_BERT_V2,
  184. LLM_ARCH_BLOOM,
  185. LLM_ARCH_STABLELM,
  186. LLM_ARCH_QWEN,
  187. LLM_ARCH_QWEN2,
  188. LLM_ARCH_QWEN2MOE,
  189. LLM_ARCH_PHI2,
  190. LLM_ARCH_PHI3,
  191. LLM_ARCH_PLAMO,
  192. LLM_ARCH_CODESHELL,
  193. LLM_ARCH_ORION,
  194. LLM_ARCH_INTERNLM2,
  195. LLM_ARCH_MINICPM,
  196. LLM_ARCH_GEMMA,
  197. LLM_ARCH_STARCODER2,
  198. LLM_ARCH_MAMBA,
  199. LLM_ARCH_XVERSE,
  200. LLM_ARCH_COMMAND_R,
  201. LLM_ARCH_DBRX,
  202. LLM_ARCH_OLMO,
  203. LLM_ARCH_UNKNOWN,
  204. };
  205. static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
  206. { LLM_ARCH_LLAMA, "llama" },
  207. { LLM_ARCH_FALCON, "falcon" },
  208. { LLM_ARCH_GROK, "grok" },
  209. { LLM_ARCH_GPT2, "gpt2" },
  210. { LLM_ARCH_GPTJ, "gptj" },
  211. { LLM_ARCH_GPTNEOX, "gptneox" },
  212. { LLM_ARCH_MPT, "mpt" },
  213. { LLM_ARCH_BAICHUAN, "baichuan" },
  214. { LLM_ARCH_STARCODER, "starcoder" },
  215. { LLM_ARCH_REFACT, "refact" },
  216. { LLM_ARCH_BERT, "bert" },
  217. { LLM_ARCH_NOMIC_BERT, "nomic-bert" },
  218. { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" },
  219. { LLM_ARCH_BLOOM, "bloom" },
  220. { LLM_ARCH_STABLELM, "stablelm" },
  221. { LLM_ARCH_QWEN, "qwen" },
  222. { LLM_ARCH_QWEN2, "qwen2" },
  223. { LLM_ARCH_QWEN2MOE, "qwen2moe" },
  224. { LLM_ARCH_PHI2, "phi2" },
  225. { LLM_ARCH_PHI3, "phi3" },
  226. { LLM_ARCH_PLAMO, "plamo" },
  227. { LLM_ARCH_CODESHELL, "codeshell" },
  228. { LLM_ARCH_ORION, "orion" },
  229. { LLM_ARCH_INTERNLM2, "internlm2" },
  230. { LLM_ARCH_MINICPM, "minicpm" },
  231. { LLM_ARCH_GEMMA, "gemma" },
  232. { LLM_ARCH_STARCODER2, "starcoder2" },
  233. { LLM_ARCH_MAMBA, "mamba" },
  234. { LLM_ARCH_XVERSE, "xverse" },
  235. { LLM_ARCH_COMMAND_R, "command-r" },
  236. { LLM_ARCH_DBRX, "dbrx" },
  237. { LLM_ARCH_OLMO, "olmo" },
  238. { LLM_ARCH_UNKNOWN, "(unknown)" },
  239. };
  240. enum llm_kv {
  241. LLM_KV_GENERAL_ARCHITECTURE,
  242. LLM_KV_GENERAL_QUANTIZATION_VERSION,
  243. LLM_KV_GENERAL_ALIGNMENT,
  244. LLM_KV_GENERAL_NAME,
  245. LLM_KV_GENERAL_AUTHOR,
  246. LLM_KV_GENERAL_VERSION,
  247. LLM_KV_GENERAL_URL,
  248. LLM_KV_GENERAL_DESCRIPTION,
  249. LLM_KV_GENERAL_LICENSE,
  250. LLM_KV_GENERAL_SOURCE_URL,
  251. LLM_KV_GENERAL_SOURCE_HF_REPO,
  252. LLM_KV_VOCAB_SIZE,
  253. LLM_KV_CONTEXT_LENGTH,
  254. LLM_KV_EMBEDDING_LENGTH,
  255. LLM_KV_BLOCK_COUNT,
  256. LLM_KV_FEED_FORWARD_LENGTH,
  257. LLM_KV_USE_PARALLEL_RESIDUAL,
  258. LLM_KV_TENSOR_DATA_LAYOUT,
  259. LLM_KV_EXPERT_COUNT,
  260. LLM_KV_EXPERT_USED_COUNT,
  261. LLM_KV_POOLING_TYPE,
  262. LLM_KV_LOGIT_SCALE,
  263. LLM_KV_ATTENTION_HEAD_COUNT,
  264. LLM_KV_ATTENTION_HEAD_COUNT_KV,
  265. LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
  266. LLM_KV_ATTENTION_CLAMP_KQV,
  267. LLM_KV_ATTENTION_KEY_LENGTH,
  268. LLM_KV_ATTENTION_VALUE_LENGTH,
  269. LLM_KV_ATTENTION_LAYERNORM_EPS,
  270. LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
  271. LLM_KV_ATTENTION_CAUSAL,
  272. LLM_KV_ROPE_DIMENSION_COUNT,
  273. LLM_KV_ROPE_FREQ_BASE,
  274. LLM_KV_ROPE_SCALE_LINEAR,
  275. LLM_KV_ROPE_SCALING_TYPE,
  276. LLM_KV_ROPE_SCALING_FACTOR,
  277. LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
  278. LLM_KV_ROPE_SCALING_FINETUNED,
  279. LLM_KV_SPLIT_NO,
  280. LLM_KV_SPLIT_COUNT,
  281. LLM_KV_SPLIT_TENSORS_COUNT,
  282. LLM_KV_SSM_INNER_SIZE,
  283. LLM_KV_SSM_CONV_KERNEL,
  284. LLM_KV_SSM_STATE_SIZE,
  285. LLM_KV_SSM_TIME_STEP_RANK,
  286. LLM_KV_TOKENIZER_MODEL,
  287. LLM_KV_TOKENIZER_PRE,
  288. LLM_KV_TOKENIZER_LIST,
  289. LLM_KV_TOKENIZER_TOKEN_TYPE,
  290. LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,
  291. LLM_KV_TOKENIZER_SCORES,
  292. LLM_KV_TOKENIZER_MERGES,
  293. LLM_KV_TOKENIZER_BOS_ID,
  294. LLM_KV_TOKENIZER_EOS_ID,
  295. LLM_KV_TOKENIZER_UNK_ID,
  296. LLM_KV_TOKENIZER_SEP_ID,
  297. LLM_KV_TOKENIZER_PAD_ID,
  298. LLM_KV_TOKENIZER_CLS_ID,
  299. LLM_KV_TOKENIZER_MASK_ID,
  300. LLM_KV_TOKENIZER_ADD_BOS,
  301. LLM_KV_TOKENIZER_ADD_EOS,
  302. LLM_KV_TOKENIZER_ADD_PREFIX,
  303. LLM_KV_TOKENIZER_HF_JSON,
  304. LLM_KV_TOKENIZER_RWKV,
  305. LLM_KV_TOKENIZER_PREFIX_ID,
  306. LLM_KV_TOKENIZER_SUFFIX_ID,
  307. LLM_KV_TOKENIZER_MIDDLE_ID,
  308. LLM_KV_TOKENIZER_EOT_ID,
  309. };
  310. static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
  311. { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
  312. { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
  313. { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
  314. { LLM_KV_GENERAL_NAME, "general.name" },
  315. { LLM_KV_GENERAL_AUTHOR, "general.author" },
  316. { LLM_KV_GENERAL_VERSION, "general.version" },
  317. { LLM_KV_GENERAL_URL, "general.url" },
  318. { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
  319. { LLM_KV_GENERAL_LICENSE, "general.license" },
  320. { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" },
  321. { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" },
  322. { LLM_KV_VOCAB_SIZE, "%s.vocab_size" },
  323. { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
  324. { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
  325. { LLM_KV_BLOCK_COUNT, "%s.block_count" },
  326. { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
  327. { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
  328. { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
  329. { LLM_KV_EXPERT_COUNT, "%s.expert_count" },
  330. { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
  331. { LLM_KV_POOLING_TYPE , "%s.pooling_type" },
  332. { LLM_KV_LOGIT_SCALE, "%s.logit_scale" },
  333. { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
  334. { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
  335. { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
  336. { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
  337. { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" },
  338. { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" },
  339. { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
  340. { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
  341. { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" },
  342. { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
  343. { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
  344. { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
  345. { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" },
  346. { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" },
  347. { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
  348. { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" },
  349. { LLM_KV_SPLIT_NO, "split.no" },
  350. { LLM_KV_SPLIT_COUNT, "split.count" },
  351. { LLM_KV_SPLIT_TENSORS_COUNT, "split.tensors.count" },
  352. { LLM_KV_SSM_CONV_KERNEL, "%s.ssm.conv_kernel" },
  353. { LLM_KV_SSM_INNER_SIZE, "%s.ssm.inner_size" },
  354. { LLM_KV_SSM_STATE_SIZE, "%s.ssm.state_size" },
  355. { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" },
  356. { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
  357. { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" },
  358. { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
  359. { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
  360. { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, "tokenizer.ggml.token_type_count" },
  361. { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
  362. { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
  363. { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
  364. { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
  365. { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
  366. { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
  367. { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
  368. { LLM_KV_TOKENIZER_CLS_ID, "tokenizer.ggml.cls_token_id" },
  369. { LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" },
  370. { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
  371. { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
  372. { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" },
  373. { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
  374. { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
  375. { LLM_KV_TOKENIZER_PREFIX_ID, "tokenizer.ggml.prefix_token_id" },
  376. { LLM_KV_TOKENIZER_SUFFIX_ID, "tokenizer.ggml.suffix_token_id" },
  377. { LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" },
  378. { LLM_KV_TOKENIZER_EOT_ID, "tokenizer.ggml.eot_token_id" },
  379. };
  380. struct LLM_KV {
  381. LLM_KV(llm_arch arch) : arch(arch) {}
  382. llm_arch arch;
  383. std::string operator()(llm_kv kv) const {
  384. return ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
  385. }
  386. };
  387. enum llm_tensor {
  388. LLM_TENSOR_TOKEN_EMBD,
  389. LLM_TENSOR_TOKEN_EMBD_NORM,
  390. LLM_TENSOR_TOKEN_TYPES,
  391. LLM_TENSOR_POS_EMBD,
  392. LLM_TENSOR_OUTPUT,
  393. LLM_TENSOR_OUTPUT_NORM,
  394. LLM_TENSOR_ROPE_FREQS,
  395. LLM_TENSOR_ATTN_Q,
  396. LLM_TENSOR_ATTN_K,
  397. LLM_TENSOR_ATTN_V,
  398. LLM_TENSOR_ATTN_QKV,
  399. LLM_TENSOR_ATTN_OUT,
  400. LLM_TENSOR_ATTN_NORM,
  401. LLM_TENSOR_ATTN_NORM_2,
  402. LLM_TENSOR_ATTN_OUT_NORM,
  403. LLM_TENSOR_ATTN_ROT_EMBD,
  404. LLM_TENSOR_FFN_GATE_INP,
  405. LLM_TENSOR_FFN_GATE_INP_SHEXP,
  406. LLM_TENSOR_FFN_NORM,
  407. LLM_TENSOR_FFN_GATE,
  408. LLM_TENSOR_FFN_DOWN,
  409. LLM_TENSOR_FFN_UP,
  410. LLM_TENSOR_FFN_ACT,
  411. LLM_TENSOR_FFN_DOWN_EXP, // split experts for backward compatibility
  412. LLM_TENSOR_FFN_GATE_EXP,
  413. LLM_TENSOR_FFN_UP_EXP,
  414. LLM_TENSOR_FFN_DOWN_EXPS, // merged experts
  415. LLM_TENSOR_FFN_GATE_EXPS,
  416. LLM_TENSOR_FFN_UP_EXPS,
  417. LLM_TENSOR_FFN_DOWN_SHEXP,
  418. LLM_TENSOR_FFN_GATE_SHEXP,
  419. LLM_TENSOR_FFN_UP_SHEXP,
  420. LLM_TENSOR_ATTN_Q_NORM,
  421. LLM_TENSOR_ATTN_K_NORM,
  422. LLM_TENSOR_LAYER_OUT_NORM,
  423. LLM_TENSOR_SSM_IN,
  424. LLM_TENSOR_SSM_CONV1D,
  425. LLM_TENSOR_SSM_X,
  426. LLM_TENSOR_SSM_DT,
  427. LLM_TENSOR_SSM_A,
  428. LLM_TENSOR_SSM_D,
  429. LLM_TENSOR_SSM_OUT,
  430. };
  431. static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
  432. {
  433. LLM_ARCH_LLAMA,
  434. {
  435. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  436. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  437. { LLM_TENSOR_OUTPUT, "output" },
  438. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  439. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  440. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  441. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  442. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  443. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  444. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  445. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  446. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  447. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  448. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  449. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  450. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  451. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  452. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  453. { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
  454. { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
  455. { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
  456. },
  457. },
  458. {
  459. LLM_ARCH_BAICHUAN,
  460. {
  461. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  462. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  463. { LLM_TENSOR_OUTPUT, "output" },
  464. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  465. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  466. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  467. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  468. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  469. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  470. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  471. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  472. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  473. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  474. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  475. },
  476. },
  477. {
  478. LLM_ARCH_FALCON,
  479. {
  480. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  481. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  482. { LLM_TENSOR_OUTPUT, "output" },
  483. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  484. { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
  485. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  486. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  487. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  488. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  489. },
  490. },
  491. {
  492. LLM_ARCH_GROK,
  493. {
  494. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  495. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  496. { LLM_TENSOR_OUTPUT, "output" },
  497. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  498. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  499. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  500. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  501. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  502. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  503. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  504. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  505. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  506. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  507. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  508. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  509. { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
  510. { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
  511. { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
  512. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  513. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  514. },
  515. },
  516. {
  517. LLM_ARCH_GPT2,
  518. {
  519. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  520. { LLM_TENSOR_POS_EMBD, "position_embd" },
  521. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  522. { LLM_TENSOR_OUTPUT, "output" },
  523. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  524. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  525. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  526. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  527. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  528. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  529. },
  530. },
  531. {
  532. LLM_ARCH_GPTJ,
  533. {
  534. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  535. },
  536. },
  537. {
  538. LLM_ARCH_GPTNEOX,
  539. {
  540. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  541. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  542. { LLM_TENSOR_OUTPUT, "output" },
  543. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  544. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  545. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  546. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  547. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  548. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  549. },
  550. },
  551. {
  552. LLM_ARCH_MPT,
  553. {
  554. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  555. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  556. { LLM_TENSOR_OUTPUT, "output"},
  557. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  558. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  559. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  560. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  561. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  562. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  563. { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" },
  564. { LLM_TENSOR_POS_EMBD, "position_embd" },
  565. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
  566. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
  567. },
  568. },
  569. {
  570. LLM_ARCH_STARCODER,
  571. {
  572. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  573. { LLM_TENSOR_POS_EMBD, "position_embd" },
  574. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  575. { LLM_TENSOR_OUTPUT, "output" },
  576. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  577. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  578. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  579. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  580. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  581. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  582. },
  583. },
  584. {
  585. LLM_ARCH_REFACT,
  586. {
  587. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  588. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  589. { LLM_TENSOR_OUTPUT, "output" },
  590. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  591. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  592. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  593. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  594. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  595. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  596. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  597. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  598. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  599. },
  600. },
  601. {
  602. LLM_ARCH_BERT,
  603. {
  604. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  605. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  606. { LLM_TENSOR_TOKEN_TYPES, "token_types" },
  607. { LLM_TENSOR_POS_EMBD, "position_embd" },
  608. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  609. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  610. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  611. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  612. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  613. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  614. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  615. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  616. },
  617. },
  618. {
  619. LLM_ARCH_NOMIC_BERT,
  620. {
  621. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  622. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  623. { LLM_TENSOR_TOKEN_TYPES, "token_types" },
  624. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  625. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  626. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  627. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  628. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  629. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  630. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  631. },
  632. },
  633. {
  634. LLM_ARCH_JINA_BERT_V2,
  635. {
  636. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  637. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  638. { LLM_TENSOR_TOKEN_TYPES, "token_types" },
  639. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  640. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  641. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
  642. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  643. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
  644. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  645. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  646. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  647. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  648. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  649. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  650. },
  651. },
  652. {
  653. LLM_ARCH_BLOOM,
  654. {
  655. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  656. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  657. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  658. { LLM_TENSOR_OUTPUT, "output" },
  659. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  660. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  661. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  662. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  663. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  664. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  665. },
  666. },
  667. {
  668. LLM_ARCH_STABLELM,
  669. {
  670. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  671. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  672. { LLM_TENSOR_OUTPUT, "output" },
  673. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  674. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  675. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  676. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  677. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  678. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  679. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  680. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  681. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  682. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  683. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
  684. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
  685. },
  686. },
  687. {
  688. LLM_ARCH_QWEN,
  689. {
  690. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  691. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  692. { LLM_TENSOR_OUTPUT, "output" },
  693. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  694. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  695. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  696. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  697. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  698. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  699. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  700. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  701. },
  702. },
  703. {
  704. LLM_ARCH_QWEN2,
  705. {
  706. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  707. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  708. { LLM_TENSOR_OUTPUT, "output" },
  709. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  710. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  711. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  712. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  713. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  714. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  715. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  716. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  717. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  718. },
  719. },
  720. {
  721. LLM_ARCH_QWEN2MOE,
  722. {
  723. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  724. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  725. { LLM_TENSOR_OUTPUT, "output" },
  726. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  727. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  728. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  729. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  730. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  731. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  732. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  733. { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
  734. { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
  735. { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
  736. { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
  737. { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
  738. { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
  739. { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
  740. },
  741. },
  742. {
  743. LLM_ARCH_PHI2,
  744. {
  745. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  746. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  747. { LLM_TENSOR_OUTPUT, "output" },
  748. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  749. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  750. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  751. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  752. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  753. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  754. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  755. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  756. },
  757. },
  758. {
  759. LLM_ARCH_PHI3,
  760. {
  761. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  762. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  763. { LLM_TENSOR_OUTPUT, "output" },
  764. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  765. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  766. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  767. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  768. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  769. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  770. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  771. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  772. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  773. },
  774. },
  775. {
  776. LLM_ARCH_PLAMO,
  777. {
  778. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  779. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  780. { LLM_TENSOR_OUTPUT, "output" },
  781. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  782. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  783. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  784. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  785. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  786. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  787. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  788. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  789. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  790. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  791. },
  792. },
  793. {
  794. LLM_ARCH_CODESHELL,
  795. {
  796. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  797. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  798. { LLM_TENSOR_OUTPUT, "output" },
  799. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  800. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  801. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  802. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  803. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  804. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  805. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  806. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  807. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  808. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  809. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  810. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  811. },
  812. },
  813. {
  814. LLM_ARCH_ORION,
  815. {
  816. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  817. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  818. { LLM_TENSOR_OUTPUT, "output" },
  819. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  820. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  821. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  822. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  823. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  824. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  825. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  826. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  827. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  828. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  829. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  830. },
  831. },
  832. {
  833. LLM_ARCH_INTERNLM2,
  834. {
  835. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  836. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  837. { LLM_TENSOR_OUTPUT, "output" },
  838. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  839. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  840. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  841. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  842. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  843. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  844. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  845. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  846. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  847. },
  848. },
  849. {
  850. LLM_ARCH_MINICPM,
  851. {
  852. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  853. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  854. { LLM_TENSOR_OUTPUT, "output" },
  855. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  856. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  857. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  858. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  859. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  860. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  861. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  862. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  863. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  864. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  865. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  866. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  867. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  868. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  869. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  870. },
  871. },
  872. {
  873. LLM_ARCH_GEMMA,
  874. {
  875. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  876. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  877. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  878. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  879. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  880. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  881. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  882. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  883. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  884. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  885. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  886. },
  887. },
  888. {
  889. LLM_ARCH_STARCODER2,
  890. {
  891. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  892. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  893. { LLM_TENSOR_OUTPUT, "output" },
  894. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  895. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  896. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  897. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  898. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  899. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  900. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  901. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  902. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  903. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  904. },
  905. },
  906. {
  907. LLM_ARCH_MAMBA,
  908. {
  909. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  910. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  911. { LLM_TENSOR_OUTPUT, "output" },
  912. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  913. { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
  914. { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
  915. { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" },
  916. { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
  917. { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
  918. { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
  919. { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
  920. },
  921. },
  922. {
  923. LLM_ARCH_XVERSE,
  924. {
  925. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  926. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  927. { LLM_TENSOR_OUTPUT, "output" },
  928. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  929. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  930. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  931. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  932. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  933. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  934. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  935. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  936. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  937. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  938. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  939. },
  940. },
  941. {
  942. LLM_ARCH_COMMAND_R,
  943. {
  944. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  945. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  946. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  947. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  948. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  949. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  950. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  951. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  952. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  953. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  954. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
  955. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
  956. },
  957. },
  958. {
  959. LLM_ARCH_DBRX,
  960. {
  961. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  962. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  963. { LLM_TENSOR_OUTPUT, "output" },
  964. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  965. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  966. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  967. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  968. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  969. { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
  970. { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
  971. { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
  972. },
  973. },
  974. {
  975. LLM_ARCH_OLMO,
  976. {
  977. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  978. { LLM_TENSOR_OUTPUT, "output" },
  979. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  980. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  981. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  982. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  983. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  984. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  985. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  986. },
  987. },
  988. {
  989. LLM_ARCH_UNKNOWN,
  990. {
  991. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  992. },
  993. },
  994. };
  995. static llm_arch llm_arch_from_string(const std::string & name) {
  996. for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
  997. if (kv.second == name) {
  998. return kv.first;
  999. }
  1000. }
  1001. return LLM_ARCH_UNKNOWN;
  1002. }
  1003. // helper to handle gguf constants
  1004. // usage:
  1005. //
  1006. // const auto tn = LLM_TN(LLM_ARCH_LLAMA);
  1007. //
  1008. // std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
  1009. // std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
  1010. // std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
  1011. //
  1012. struct LLM_TN {
  1013. LLM_TN(llm_arch arch) : arch(arch) {}
  1014. llm_arch arch;
  1015. std::string operator()(llm_tensor tensor) const {
  1016. if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
  1017. return "__missing__";
  1018. }
  1019. return LLM_TENSOR_NAMES.at(arch).at(tensor);
  1020. }
  1021. std::string operator()(llm_tensor tensor, const std::string & suffix) const {
  1022. if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
  1023. return "__missing__";
  1024. }
  1025. return LLM_TENSOR_NAMES.at(arch).at(tensor) + "." + suffix;
  1026. }
  1027. std::string operator()(llm_tensor tensor, int bid) const {
  1028. if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
  1029. return "__missing__";
  1030. }
  1031. return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid);
  1032. }
  1033. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
  1034. if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
  1035. return "__missing__";
  1036. }
  1037. return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid) + "." + suffix;
  1038. }
  1039. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
  1040. if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
  1041. return "__missing__";
  1042. }
  1043. return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid, xid) + "." + suffix;
  1044. }
  1045. };
  1046. //
  1047. // gguf helpers
  1048. //
  1049. static const std::map<llama_rope_scaling_type, const char *> LLAMA_ROPE_SCALING_TYPES = {
  1050. { LLAMA_ROPE_SCALING_TYPE_NONE, "none" },
  1051. { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" },
  1052. { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" },
  1053. };
  1054. static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
  1055. for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
  1056. if (kv.second == name) {
  1057. return (llama_rope_scaling_type) kv.first;
  1058. }
  1059. }
  1060. return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
  1061. }
  1062. static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
  1063. switch (type) {
  1064. case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
  1065. case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
  1066. case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
  1067. case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
  1068. case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
  1069. case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
  1070. case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
  1071. case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
  1072. case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
  1073. case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
  1074. case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
  1075. default: return format("unknown type %d", type);
  1076. }
  1077. }
  1078. static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
  1079. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  1080. switch (type) {
  1081. case GGUF_TYPE_STRING:
  1082. return gguf_get_val_str(ctx_gguf, i);
  1083. case GGUF_TYPE_ARRAY:
  1084. {
  1085. const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
  1086. int arr_n = gguf_get_arr_n(ctx_gguf, i);
  1087. const void * data = gguf_get_arr_data(ctx_gguf, i);
  1088. std::stringstream ss;
  1089. ss << "[";
  1090. for (int j = 0; j < arr_n; j++) {
  1091. if (arr_type == GGUF_TYPE_STRING) {
  1092. std::string val = gguf_get_arr_str(ctx_gguf, i, j);
  1093. // escape quotes
  1094. replace_all(val, "\\", "\\\\");
  1095. replace_all(val, "\"", "\\\"");
  1096. ss << '"' << val << '"';
  1097. } else if (arr_type == GGUF_TYPE_ARRAY) {
  1098. ss << "???";
  1099. } else {
  1100. ss << gguf_data_to_str(arr_type, data, j);
  1101. }
  1102. if (j < arr_n - 1) {
  1103. ss << ", ";
  1104. }
  1105. }
  1106. ss << "]";
  1107. return ss.str();
  1108. }
  1109. default:
  1110. return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
  1111. }
  1112. }
  1113. //
  1114. // llama helpers
  1115. //
  1116. #if defined(_WIN32)
  1117. static std::string llama_format_win_err(DWORD err) {
  1118. LPSTR buf;
  1119. size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
  1120. NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
  1121. if (!size) {
  1122. return "FormatMessageA failed";
  1123. }
  1124. std::string ret(buf, size);
  1125. LocalFree(buf);
  1126. return ret;
  1127. }
  1128. #endif
  1129. template <typename T>
  1130. struct no_init {
  1131. T value;
  1132. no_init() { /* do nothing */ }
  1133. };
  1134. struct llama_file {
  1135. // use FILE * so we don't have to re-open the file to mmap
  1136. FILE * fp;
  1137. size_t size;
  1138. llama_file(const char * fname, const char * mode) {
  1139. fp = ggml_fopen(fname, mode);
  1140. if (fp == NULL) {
  1141. throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
  1142. }
  1143. seek(0, SEEK_END);
  1144. size = tell();
  1145. seek(0, SEEK_SET);
  1146. }
  1147. size_t tell() const {
  1148. #ifdef _WIN32
  1149. __int64 ret = _ftelli64(fp);
  1150. #else
  1151. long ret = std::ftell(fp);
  1152. #endif
  1153. GGML_ASSERT(ret != -1); // this really shouldn't fail
  1154. return (size_t) ret;
  1155. }
  1156. void seek(size_t offset, int whence) const {
  1157. #ifdef _WIN32
  1158. int ret = _fseeki64(fp, (__int64) offset, whence);
  1159. #else
  1160. int ret = std::fseek(fp, (long) offset, whence);
  1161. #endif
  1162. GGML_ASSERT(ret == 0); // same
  1163. }
  1164. void read_raw(void * ptr, size_t len) const {
  1165. if (len == 0) {
  1166. return;
  1167. }
  1168. errno = 0;
  1169. std::size_t ret = std::fread(ptr, len, 1, fp);
  1170. if (ferror(fp)) {
  1171. throw std::runtime_error(format("read error: %s", strerror(errno)));
  1172. }
  1173. if (ret != 1) {
  1174. throw std::runtime_error("unexpectedly reached end of file");
  1175. }
  1176. }
  1177. uint32_t read_u32() const {
  1178. uint32_t ret;
  1179. read_raw(&ret, sizeof(ret));
  1180. return ret;
  1181. }
  1182. void write_raw(const void * ptr, size_t len) const {
  1183. if (len == 0) {
  1184. return;
  1185. }
  1186. errno = 0;
  1187. size_t ret = std::fwrite(ptr, len, 1, fp);
  1188. if (ret != 1) {
  1189. throw std::runtime_error(format("write error: %s", strerror(errno)));
  1190. }
  1191. }
  1192. void write_u32(std::uint32_t val) const {
  1193. write_raw(&val, sizeof(val));
  1194. }
  1195. ~llama_file() {
  1196. if (fp) {
  1197. std::fclose(fp);
  1198. }
  1199. }
  1200. };
  1201. using llama_files = std::vector<std::unique_ptr<llama_file>>;
  1202. struct llama_mmap {
  1203. void * addr;
  1204. size_t size;
  1205. llama_mmap(const llama_mmap &) = delete;
  1206. #ifdef _POSIX_MAPPED_FILES
  1207. static constexpr bool SUPPORTED = true;
  1208. // list of mapped fragments (first_offset, last_offset)
  1209. std::vector<std::pair<size_t, size_t>> mapped_fragments;
  1210. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
  1211. size = file->size;
  1212. int fd = fileno(file->fp);
  1213. int flags = MAP_SHARED;
  1214. // prefetch/readahead impairs performance on NUMA systems
  1215. if (numa) { prefetch = 0; }
  1216. #ifdef __linux__
  1217. // advise the kernel to read the file sequentially (increases readahead)
  1218. if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
  1219. LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n",
  1220. strerror(errno));
  1221. }
  1222. if (prefetch) { flags |= MAP_POPULATE; }
  1223. #endif
  1224. addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
  1225. if (addr == MAP_FAILED) { // NOLINT
  1226. throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
  1227. }
  1228. if (prefetch > 0) {
  1229. // advise the kernel to preload the mapped memory
  1230. if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
  1231. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
  1232. strerror(errno));
  1233. }
  1234. }
  1235. if (numa) {
  1236. // advise the kernel not to use readahead
  1237. // (because the next page might not belong on the same node)
  1238. if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
  1239. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
  1240. strerror(errno));
  1241. }
  1242. }
  1243. // initialize list of mapped_fragments
  1244. mapped_fragments.emplace_back(0, file->size);
  1245. }
  1246. static void align_range(size_t * first, size_t * last, size_t page_size) {
  1247. // align first to the next page
  1248. size_t offset_in_page = *first & (page_size - 1);
  1249. size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page;
  1250. *first += offset_to_page;
  1251. // align last to the previous page
  1252. *last = *last & ~(page_size - 1);
  1253. if (*last <= *first) {
  1254. *last = *first;
  1255. }
  1256. }
  1257. // partially unmap the file in the range [first, last)
  1258. void unmap_fragment(size_t first, size_t last) {
  1259. // note: this function must not be called multiple times with overlapping ranges
  1260. // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
  1261. int page_size = sysconf(_SC_PAGESIZE);
  1262. align_range(&first, &last, page_size);
  1263. size_t len = last - first;
  1264. if (len == 0) {
  1265. return;
  1266. }
  1267. GGML_ASSERT(first % page_size == 0);
  1268. GGML_ASSERT(last % page_size == 0);
  1269. GGML_ASSERT(last > first);
  1270. void * next_page_start = (uint8_t *) addr + first;
  1271. // unmap the range
  1272. if (munmap(next_page_start, len)) {
  1273. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  1274. }
  1275. // update the list of mapped fragments to avoid unmapping the same range again in the destructor
  1276. std::vector<std::pair<size_t, size_t>> new_mapped_fragments;
  1277. for (const auto & frag : mapped_fragments) {
  1278. if (frag.first < first && frag.second > last) {
  1279. // the range is in the middle of the fragment, split it
  1280. new_mapped_fragments.emplace_back(frag.first, first);
  1281. new_mapped_fragments.emplace_back(last, frag.second);
  1282. } else if (frag.first < first && frag.second > first) {
  1283. // the range starts in the middle of the fragment
  1284. new_mapped_fragments.emplace_back(frag.first, first);
  1285. } else if (frag.first < last && frag.second > last) {
  1286. // the range ends in the middle of the fragment
  1287. new_mapped_fragments.emplace_back(last, frag.second);
  1288. } else if (frag.first >= first && frag.second <= last) {
  1289. // the range covers the entire fragment
  1290. } else {
  1291. // the range is outside the fragment
  1292. new_mapped_fragments.push_back(frag);
  1293. }
  1294. }
  1295. mapped_fragments = std::move(new_mapped_fragments);
  1296. }
  1297. ~llama_mmap() {
  1298. for (const auto & frag : mapped_fragments) {
  1299. if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
  1300. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  1301. }
  1302. }
  1303. }
  1304. #elif defined(_WIN32)
  1305. static constexpr bool SUPPORTED = true;
  1306. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) {
  1307. GGML_UNUSED(numa);
  1308. size = file->size;
  1309. HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
  1310. HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
  1311. if (hMapping == NULL) {
  1312. DWORD error = GetLastError();
  1313. throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
  1314. }
  1315. addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
  1316. DWORD error = GetLastError();
  1317. CloseHandle(hMapping);
  1318. if (addr == NULL) {
  1319. throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
  1320. }
  1321. if (prefetch > 0) {
  1322. #if _WIN32_WINNT >= 0x602
  1323. // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
  1324. BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
  1325. HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
  1326. // may fail on pre-Windows 8 systems
  1327. pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
  1328. if (pPrefetchVirtualMemory) {
  1329. // advise the kernel to preload the mapped memory
  1330. WIN32_MEMORY_RANGE_ENTRY range;
  1331. range.VirtualAddress = addr;
  1332. range.NumberOfBytes = (SIZE_T) std::min(size, prefetch);
  1333. if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
  1334. LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n",
  1335. llama_format_win_err(GetLastError()).c_str());
  1336. }
  1337. }
  1338. #else
  1339. throw std::runtime_error("PrefetchVirtualMemory unavailable");
  1340. #endif
  1341. }
  1342. }
  1343. void unmap_fragment(size_t first, size_t last) {
  1344. // not supported
  1345. GGML_UNUSED(first);
  1346. GGML_UNUSED(last);
  1347. }
  1348. ~llama_mmap() {
  1349. if (!UnmapViewOfFile(addr)) {
  1350. LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n",
  1351. llama_format_win_err(GetLastError()).c_str());
  1352. }
  1353. }
  1354. #else
  1355. static constexpr bool SUPPORTED = false;
  1356. llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) {
  1357. GGML_UNUSED(file);
  1358. GGML_UNUSED(prefetch);
  1359. GGML_UNUSED(numa);
  1360. throw std::runtime_error("mmap not supported");
  1361. }
  1362. void unmap_fragment(size_t first, size_t last) {
  1363. GGML_UNUSED(first);
  1364. GGML_UNUSED(last);
  1365. throw std::runtime_error("mmap not supported");
  1366. }
  1367. #endif
  1368. };
  1369. using llama_mmaps = std::vector<std::unique_ptr<llama_mmap>>;
  1370. // Represents some region of memory being locked using mlock or VirtualLock;
  1371. // will automatically unlock on destruction.
  1372. struct llama_mlock {
  1373. void * addr = NULL;
  1374. size_t size = 0;
  1375. bool failed_already = false;
  1376. llama_mlock() {}
  1377. llama_mlock(const llama_mlock &) = delete;
  1378. ~llama_mlock() {
  1379. if (size) {
  1380. raw_unlock(addr, size);
  1381. }
  1382. }
  1383. void init(void * ptr) {
  1384. GGML_ASSERT(addr == NULL && size == 0); // NOLINT
  1385. addr = ptr;
  1386. }
  1387. void grow_to(size_t target_size) {
  1388. GGML_ASSERT(addr);
  1389. if (failed_already) {
  1390. return;
  1391. }
  1392. size_t granularity = lock_granularity();
  1393. target_size = (target_size + granularity - 1) & ~(granularity - 1);
  1394. if (target_size > size) {
  1395. if (raw_lock((uint8_t *) addr + size, target_size - size)) {
  1396. size = target_size;
  1397. } else {
  1398. failed_already = true;
  1399. }
  1400. }
  1401. }
  1402. #ifdef _POSIX_MEMLOCK_RANGE
  1403. static constexpr bool SUPPORTED = true;
  1404. static size_t lock_granularity() {
  1405. return (size_t) sysconf(_SC_PAGESIZE);
  1406. }
  1407. #ifdef __APPLE__
  1408. #define MLOCK_SUGGESTION \
  1409. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  1410. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MEMLOCK (ulimit -l).\n"
  1411. #else
  1412. #define MLOCK_SUGGESTION \
  1413. "Try increasing RLIMIT_MEMLOCK ('ulimit -l' as root).\n"
  1414. #endif
  1415. bool raw_lock(const void * addr, size_t size) const {
  1416. if (!mlock(addr, size)) {
  1417. return true;
  1418. }
  1419. char* errmsg = std::strerror(errno);
  1420. bool suggest = (errno == ENOMEM);
  1421. // Check if the resource limit is fine after all
  1422. struct rlimit lock_limit;
  1423. if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
  1424. suggest = false;
  1425. }
  1426. if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
  1427. suggest = false;
  1428. }
  1429. LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
  1430. size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
  1431. return false;
  1432. }
  1433. #undef MLOCK_SUGGESTION
  1434. static void raw_unlock(void * addr, size_t size) {
  1435. if (munlock(addr, size)) {
  1436. LLAMA_LOG_WARN("warning: failed to munlock buffer: %s\n", std::strerror(errno));
  1437. }
  1438. }
  1439. #elif defined(_WIN32)
  1440. static constexpr bool SUPPORTED = true;
  1441. static size_t lock_granularity() {
  1442. SYSTEM_INFO si;
  1443. GetSystemInfo(&si);
  1444. return (size_t) si.dwPageSize;
  1445. }
  1446. bool raw_lock(void * ptr, size_t len) const {
  1447. for (int tries = 1; ; tries++) {
  1448. if (VirtualLock(ptr, len)) {
  1449. return true;
  1450. }
  1451. if (tries == 2) {
  1452. LLAMA_LOG_WARN("warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
  1453. len, size, llama_format_win_err(GetLastError()).c_str());
  1454. return false;
  1455. }
  1456. // It failed but this was only the first try; increase the working
  1457. // set size and try again.
  1458. SIZE_T min_ws_size, max_ws_size;
  1459. if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
  1460. LLAMA_LOG_WARN("warning: GetProcessWorkingSetSize failed: %s\n",
  1461. llama_format_win_err(GetLastError()).c_str());
  1462. return false;
  1463. }
  1464. // Per MSDN: "The maximum number of pages that a process can lock
  1465. // is equal to the number of pages in its minimum working set minus
  1466. // a small overhead."
  1467. // Hopefully a megabyte is enough overhead:
  1468. size_t increment = len + 1048576;
  1469. // The minimum must be <= the maximum, so we need to increase both:
  1470. min_ws_size += increment;
  1471. max_ws_size += increment;
  1472. if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
  1473. LLAMA_LOG_WARN("warning: SetProcessWorkingSetSize failed: %s\n",
  1474. llama_format_win_err(GetLastError()).c_str());
  1475. return false;
  1476. }
  1477. }
  1478. }
  1479. static void raw_unlock(void * ptr, size_t len) {
  1480. if (!VirtualUnlock(ptr, len)) {
  1481. LLAMA_LOG_WARN("warning: failed to VirtualUnlock buffer: %s\n",
  1482. llama_format_win_err(GetLastError()).c_str());
  1483. }
  1484. }
  1485. #else
  1486. static constexpr bool SUPPORTED = false;
  1487. static size_t lock_granularity() {
  1488. return (size_t) 65536;
  1489. }
  1490. bool raw_lock(const void * addr, size_t len) const {
  1491. LLAMA_LOG_WARN("warning: mlock not supported on this system\n");
  1492. return false;
  1493. }
  1494. static void raw_unlock(const void * addr, size_t len) {}
  1495. #endif
  1496. };
  1497. using llama_mlocks = std::vector<std::unique_ptr<llama_mlock>>;
  1498. static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
  1499. std::vector<char> result(8, 0);
  1500. const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special);
  1501. if (n_tokens < 0) {
  1502. result.resize(-n_tokens);
  1503. int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special);
  1504. GGML_ASSERT(check == -n_tokens);
  1505. }
  1506. else {
  1507. result.resize(n_tokens);
  1508. }
  1509. return std::string(result.data(), result.size());
  1510. }
  1511. static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer) {
  1512. ggml_backend_buffer_type_t buft = nullptr;
  1513. #if defined(GGML_USE_CUDA)
  1514. // host buffers should only be used when data is expected to be copied to/from the GPU
  1515. if (host_buffer) {
  1516. buft = ggml_backend_cuda_host_buffer_type();
  1517. }
  1518. #elif defined(GGML_USE_SYCL)
  1519. if (host_buffer) {
  1520. buft = ggml_backend_sycl_host_buffer_type();
  1521. }
  1522. #elif defined(GGML_USE_CPU_HBM)
  1523. buft = ggml_backend_cpu_hbm_buffer_type();
  1524. #elif defined(GGML_USE_VULKAN)
  1525. if (host_buffer) {
  1526. buft = ggml_backend_vk_host_buffer_type();
  1527. }
  1528. #endif
  1529. if (buft == nullptr) {
  1530. buft = ggml_backend_cpu_buffer_type();
  1531. }
  1532. return buft;
  1533. GGML_UNUSED(host_buffer);
  1534. }
  1535. //
  1536. // globals
  1537. //
  1538. struct llama_state {
  1539. llama_state() {
  1540. #ifdef GGML_USE_METAL
  1541. ggml_backend_metal_log_set_callback(log_callback, log_callback_user_data);
  1542. #elif defined(GGML_USE_CUDA)
  1543. ggml_backend_cuda_log_set_callback(log_callback, log_callback_user_data);
  1544. #endif
  1545. }
  1546. // We save the log callback globally
  1547. ggml_log_callback log_callback = llama_log_callback_default;
  1548. void * log_callback_user_data = nullptr;
  1549. };
  1550. static llama_state g_state;
  1551. // available llama models
  1552. enum e_model {
  1553. MODEL_UNKNOWN,
  1554. MODEL_17M,
  1555. MODEL_22M,
  1556. MODEL_33M,
  1557. MODEL_109M,
  1558. MODEL_137M,
  1559. MODEL_335M,
  1560. MODEL_0_5B,
  1561. MODEL_1B,
  1562. MODEL_2B,
  1563. MODEL_3B,
  1564. MODEL_4B,
  1565. MODEL_7B,
  1566. MODEL_8B,
  1567. MODEL_12B,
  1568. MODEL_13B,
  1569. MODEL_14B,
  1570. MODEL_15B,
  1571. MODEL_20B,
  1572. MODEL_30B,
  1573. MODEL_34B,
  1574. MODEL_35B,
  1575. MODEL_40B,
  1576. MODEL_65B,
  1577. MODEL_70B,
  1578. MODEL_314B,
  1579. MODEL_SMALL,
  1580. MODEL_MEDIUM,
  1581. MODEL_LARGE,
  1582. MODEL_XL,
  1583. MODEL_A2_7B,
  1584. MODEL_8x7B,
  1585. MODEL_8x22B,
  1586. MODEL_16x12B,
  1587. };
  1588. static const size_t kiB = 1024;
  1589. static const size_t MiB = 1024*kiB;
  1590. static const size_t GiB = 1024*MiB;
  1591. struct llama_hparams {
  1592. bool vocab_only;
  1593. bool rope_finetuned;
  1594. uint32_t n_vocab;
  1595. uint32_t n_ctx_train; // context size the model was trained on
  1596. uint32_t n_embd;
  1597. uint32_t n_head;
  1598. uint32_t n_head_kv;
  1599. uint32_t n_layer;
  1600. uint32_t n_rot;
  1601. uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
  1602. uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
  1603. uint32_t n_ff;
  1604. uint32_t n_expert = 0;
  1605. uint32_t n_expert_used = 0;
  1606. uint32_t n_vocab_type = 0; // for BERT-style token types
  1607. float f_norm_eps;
  1608. float f_norm_rms_eps;
  1609. float rope_freq_base_train;
  1610. float rope_freq_scale_train;
  1611. uint32_t n_yarn_orig_ctx;
  1612. // for State Space Models
  1613. uint32_t ssm_d_conv = 0;
  1614. uint32_t ssm_d_inner = 0;
  1615. uint32_t ssm_d_state = 0;
  1616. uint32_t ssm_dt_rank = 0;
  1617. float f_clamp_kqv = 0.0f;
  1618. float f_max_alibi_bias = 0.0f;
  1619. float f_logit_scale = 0.0f;
  1620. bool causal_attn = true;
  1621. bool use_alibi = false;
  1622. enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
  1623. enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
  1624. enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
  1625. bool operator!=(const llama_hparams & other) const {
  1626. if (this->vocab_only != other.vocab_only) return true;
  1627. if (this->n_vocab != other.n_vocab) return true;
  1628. if (this->n_ctx_train != other.n_ctx_train) return true;
  1629. if (this->n_embd != other.n_embd) return true;
  1630. if (this->n_head != other.n_head) return true;
  1631. if (this->n_head_kv != other.n_head_kv) return true;
  1632. if (this->n_layer != other.n_layer) return true;
  1633. if (this->n_rot != other.n_rot) return true;
  1634. if (this->n_embd_head_k != other.n_embd_head_k) return true;
  1635. if (this->n_embd_head_v != other.n_embd_head_v) return true;
  1636. if (this->n_ff != other.n_ff) return true;
  1637. if (this->n_expert != other.n_expert) return true;
  1638. if (this->n_expert_used != other.n_expert_used) return true;
  1639. if (this->rope_finetuned != other.rope_finetuned) return true;
  1640. if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true;
  1641. if (this->ssm_d_conv != other.ssm_d_conv) return true;
  1642. if (this->ssm_d_inner != other.ssm_d_inner) return true;
  1643. if (this->ssm_d_state != other.ssm_d_state) return true;
  1644. if (this->ssm_dt_rank != other.ssm_dt_rank) return true;
  1645. const float EPSILON = 1e-9f;
  1646. if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
  1647. if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
  1648. if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true;
  1649. if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true;
  1650. return false;
  1651. }
  1652. uint32_t n_gqa() const {
  1653. if (n_head_kv == 0) {
  1654. return 0;
  1655. }
  1656. return n_head/n_head_kv;
  1657. }
  1658. uint32_t n_embd_k_gqa() const { // dimension of key embeddings across all k-v heads
  1659. return n_embd_head_k * n_head_kv;
  1660. }
  1661. uint32_t n_embd_v_gqa() const { // dimension of value embeddings across all k-v heads
  1662. return n_embd_head_v * n_head_kv;
  1663. }
  1664. uint32_t n_embd_k_s() const { // dimension of the rolling state embeddings
  1665. // corresponds to Mamba's conv_states size
  1666. // TODO: maybe support other convolution strides than 1
  1667. // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
  1668. return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner;
  1669. }
  1670. uint32_t n_embd_v_s() const { // dimension of the recurrent state embeddings
  1671. // corresponds to Mamba's ssm_states size
  1672. return ssm_d_state * ssm_d_inner;
  1673. }
  1674. };
  1675. struct llama_cparams {
  1676. uint32_t n_ctx; // context size used during inference
  1677. uint32_t n_batch;
  1678. uint32_t n_ubatch;
  1679. uint32_t n_seq_max;
  1680. uint32_t n_threads; // number of threads to use for generation
  1681. uint32_t n_threads_batch; // number of threads to use for batch processing
  1682. float rope_freq_base;
  1683. float rope_freq_scale;
  1684. uint32_t n_yarn_orig_ctx;
  1685. // These hyperparameters are not exposed in GGUF, because all
  1686. // existing YaRN models use the same values for them.
  1687. float yarn_ext_factor;
  1688. float yarn_attn_factor;
  1689. float yarn_beta_fast;
  1690. float yarn_beta_slow;
  1691. float defrag_thold;
  1692. bool embeddings;
  1693. bool causal_attn;
  1694. bool offload_kqv;
  1695. bool flash_attn;
  1696. enum llama_pooling_type pooling_type;
  1697. ggml_backend_sched_eval_callback cb_eval;
  1698. void * cb_eval_user_data;
  1699. };
  1700. struct llama_layer {
  1701. // normalization
  1702. struct ggml_tensor * attn_norm;
  1703. struct ggml_tensor * attn_norm_b;
  1704. struct ggml_tensor * attn_norm_2;
  1705. struct ggml_tensor * attn_norm_2_b;
  1706. struct ggml_tensor * attn_q_norm;
  1707. struct ggml_tensor * attn_q_norm_b;
  1708. struct ggml_tensor * attn_k_norm;
  1709. struct ggml_tensor * attn_k_norm_b;
  1710. struct ggml_tensor * attn_out_norm;
  1711. struct ggml_tensor * attn_out_norm_b;
  1712. // attention
  1713. struct ggml_tensor * wq;
  1714. struct ggml_tensor * wk;
  1715. struct ggml_tensor * wv;
  1716. struct ggml_tensor * wo;
  1717. struct ggml_tensor * wqkv;
  1718. // attention bias
  1719. struct ggml_tensor * bq;
  1720. struct ggml_tensor * bk;
  1721. struct ggml_tensor * bv;
  1722. struct ggml_tensor * bo;
  1723. struct ggml_tensor * bqkv;
  1724. // normalization
  1725. struct ggml_tensor * ffn_norm;
  1726. struct ggml_tensor * ffn_norm_b;
  1727. struct ggml_tensor * layer_out_norm;
  1728. struct ggml_tensor * layer_out_norm_b;
  1729. // ff
  1730. struct ggml_tensor * ffn_gate; // w1
  1731. struct ggml_tensor * ffn_down; // w2
  1732. struct ggml_tensor * ffn_up; // w3
  1733. // ff MoE
  1734. struct ggml_tensor * ffn_gate_inp;
  1735. struct ggml_tensor * ffn_gate_exps;
  1736. struct ggml_tensor * ffn_down_exps;
  1737. struct ggml_tensor * ffn_up_exps ;
  1738. // ff shared expert (shexp)
  1739. struct ggml_tensor * ffn_gate_inp_shexp;
  1740. struct ggml_tensor * ffn_gate_shexp;
  1741. struct ggml_tensor * ffn_down_shexp;
  1742. struct ggml_tensor * ffn_up_shexp;
  1743. // ff bias
  1744. struct ggml_tensor * ffn_down_b; // b2
  1745. struct ggml_tensor * ffn_up_b; // b3
  1746. struct ggml_tensor * ffn_act;
  1747. // mamba proj
  1748. struct ggml_tensor * ssm_in;
  1749. struct ggml_tensor * ssm_x;
  1750. struct ggml_tensor * ssm_dt;
  1751. struct ggml_tensor * ssm_out;
  1752. // mamba
  1753. struct ggml_tensor * ssm_conv1d;
  1754. struct ggml_tensor * ssm_a;
  1755. struct ggml_tensor * ssm_d;
  1756. // mamba bias
  1757. struct ggml_tensor * ssm_conv1d_b;
  1758. struct ggml_tensor * ssm_dt_b;
  1759. };
  1760. struct llama_kv_cell {
  1761. llama_pos pos = -1;
  1762. llama_pos delta = 0;
  1763. int32_t src = 0; // used by recurrent state models to copy states
  1764. std::set<llama_seq_id> seq_id;
  1765. bool has_seq_id(const llama_seq_id & id) const {
  1766. return seq_id.find(id) != seq_id.end();
  1767. }
  1768. bool is_empty() const {
  1769. return seq_id.empty();
  1770. }
  1771. bool is_same_seq(const llama_kv_cell & other) const {
  1772. return seq_id == other.seq_id;
  1773. }
  1774. };
  1775. // ring-buffer of cached KV data
  1776. struct llama_kv_cache {
  1777. bool has_shift = false;
  1778. bool do_defrag = false;
  1779. bool do_copy = false;
  1780. bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
  1781. bool v_trans = true; // the value tensor is transposed
  1782. // Note: The value of head isn't only used to optimize searching
  1783. // for a free KV slot. llama_decode_internal also uses it, so it
  1784. // cannot be freely changed after a slot has been allocated.
  1785. uint32_t head = 0;
  1786. uint32_t size = 0;
  1787. uint32_t used = 0; // used cells (i.e. at least one seq_id)
  1788. // computed before each graph build
  1789. uint32_t n = 0;
  1790. ggml_type type_k = GGML_TYPE_F16;
  1791. ggml_type type_v = GGML_TYPE_F16;
  1792. std::vector<llama_kv_cell> cells;
  1793. std::vector<struct ggml_tensor *> k_l; // per layer
  1794. std::vector<struct ggml_tensor *> v_l;
  1795. std::vector<struct ggml_context *> ctxs;
  1796. std::vector<ggml_backend_buffer_t> bufs;
  1797. size_t total_size() const {
  1798. size_t size = 0;
  1799. for (ggml_backend_buffer_t buf : bufs) {
  1800. size += ggml_backend_buffer_get_size(buf);
  1801. }
  1802. return size;
  1803. }
  1804. ~llama_kv_cache() {
  1805. for (struct ggml_context * ctx : ctxs) {
  1806. ggml_free(ctx);
  1807. }
  1808. for (ggml_backend_buffer_t buf : bufs) {
  1809. ggml_backend_buffer_free(buf);
  1810. }
  1811. }
  1812. };
  1813. struct llama_control_vector {
  1814. std::vector<struct ggml_tensor *> tensors; // per layer
  1815. std::vector<struct ggml_context *> ctxs;
  1816. std::vector<ggml_backend_buffer_t> bufs;
  1817. int32_t layer_start = -1;
  1818. int32_t layer_end = -1;
  1819. ggml_tensor * tensor_for(int il) const {
  1820. if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
  1821. return nullptr;
  1822. }
  1823. return tensors[il];
  1824. }
  1825. ~llama_control_vector() {
  1826. for (struct ggml_context * ctx : ctxs) {
  1827. ggml_free(ctx);
  1828. }
  1829. for (ggml_backend_buffer_t buf : bufs) {
  1830. ggml_backend_buffer_free(buf);
  1831. }
  1832. }
  1833. };
  1834. struct llama_vocab {
  1835. using id = int32_t;
  1836. using token = std::string;
  1837. using ttype = llama_token_type;
  1838. struct token_data {
  1839. token text;
  1840. float score;
  1841. ttype type;
  1842. };
  1843. enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM;
  1844. enum llama_vocab_pre_type type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  1845. std::unordered_map<token, id> token_to_id;
  1846. std::vector<token_data> id_to_token;
  1847. std::unordered_map<token, id> special_tokens_cache;
  1848. std::map<std::pair<std::string, std::string>, int> bpe_ranks;
  1849. // default LLaMA special tokens
  1850. id special_bos_id = 1;
  1851. id special_eos_id = 2;
  1852. id special_unk_id = 0;
  1853. id special_sep_id = -1;
  1854. id special_pad_id = -1;
  1855. id special_cls_id = -1;
  1856. id special_mask_id = -1;
  1857. int special_add_bos = -1; // -1 unknown, 1 add, 0 don't add.
  1858. int special_add_eos = -1; // -1 unknown, 1 add, 0 don't add.
  1859. id linefeed_id = 13;
  1860. id special_prefix_id = -1;
  1861. id special_suffix_id = -1;
  1862. id special_middle_id = -1;
  1863. id special_eot_id = -1; // TODO: move above after "eos_id", and here add "file separator" token
  1864. bool add_space_prefix = true;
  1865. int find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
  1866. GGML_ASSERT(token_left.find(' ') == std::string::npos);
  1867. GGML_ASSERT(token_left.find('\n') == std::string::npos);
  1868. GGML_ASSERT(token_right.find(' ') == std::string::npos);
  1869. GGML_ASSERT(token_right.find('\n') == std::string::npos);
  1870. auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
  1871. if (it == bpe_ranks.end()) {
  1872. return -1;
  1873. }
  1874. return it->second;
  1875. }
  1876. };
  1877. struct llama_model {
  1878. e_model type = MODEL_UNKNOWN;
  1879. llm_arch arch = LLM_ARCH_UNKNOWN;
  1880. llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
  1881. std::string name = "n/a";
  1882. llama_hparams hparams = {};
  1883. llama_vocab vocab;
  1884. struct ggml_tensor * tok_embd;
  1885. struct ggml_tensor * type_embd;
  1886. struct ggml_tensor * pos_embd;
  1887. struct ggml_tensor * tok_norm;
  1888. struct ggml_tensor * tok_norm_b;
  1889. struct ggml_tensor * output_norm;
  1890. struct ggml_tensor * output_norm_b;
  1891. struct ggml_tensor * output;
  1892. struct ggml_tensor * output_b;
  1893. std::vector<llama_layer> layers;
  1894. llama_split_mode split_mode;
  1895. int main_gpu;
  1896. int n_gpu_layers;
  1897. std::vector<std::string> rpc_servers;
  1898. // gguf metadata
  1899. std::unordered_map<std::string, std::string> gguf_kv;
  1900. // layer -> buffer type mapping
  1901. struct layer_buft {
  1902. layer_buft() : buft_matrix(nullptr), buft(nullptr) {}
  1903. layer_buft(ggml_backend_buffer_type_t matrix) : buft_matrix(matrix), buft(matrix) {}
  1904. layer_buft(ggml_backend_buffer_type_t matrix, ggml_backend_buffer_type_t other) : buft_matrix(matrix), buft(other) {}
  1905. ggml_backend_buffer_type_t buft_matrix; // matrices only - used by split buffers and backends that support only matrix multiplication
  1906. ggml_backend_buffer_type_t buft; // everything else
  1907. };
  1908. layer_buft buft_input;
  1909. layer_buft buft_output;
  1910. std::vector<layer_buft> buft_layer;
  1911. // contexts where the model tensors metadata is stored
  1912. std::vector<struct ggml_context *> ctxs;
  1913. // the model memory buffers for the tensor data
  1914. std::vector<ggml_backend_buffer_t> bufs;
  1915. // model memory mapped files
  1916. llama_mmaps mappings;
  1917. // objects representing data potentially being locked in memory
  1918. llama_mlocks mlock_bufs;
  1919. llama_mlocks mlock_mmaps;
  1920. // for quantize-stats only
  1921. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  1922. int64_t t_load_us = 0;
  1923. int64_t t_start_us = 0;
  1924. ~llama_model() {
  1925. for (struct ggml_context * ctx : ctxs) {
  1926. ggml_free(ctx);
  1927. }
  1928. for (ggml_backend_buffer_t buf : bufs) {
  1929. #ifdef GGML_USE_CUDA
  1930. if (ggml_backend_buffer_get_type(buf) == ggml_backend_cpu_buffer_type()) {
  1931. ggml_backend_cuda_unregister_host_buffer(ggml_backend_buffer_get_base(buf));
  1932. }
  1933. #endif
  1934. ggml_backend_buffer_free(buf);
  1935. }
  1936. }
  1937. };
  1938. struct llama_context {
  1939. llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {}
  1940. ~llama_context() {
  1941. ggml_backend_sched_free(sched);
  1942. for (ggml_backend_t backend : backends) {
  1943. ggml_backend_free(backend);
  1944. }
  1945. ggml_backend_buffer_free(buf_output);
  1946. }
  1947. llama_cparams cparams;
  1948. std::vector<ggml_backend_t> backends;
  1949. #ifdef GGML_USE_METAL
  1950. ggml_backend_t backend_metal = nullptr;
  1951. #endif
  1952. ggml_backend_t backend_cpu = nullptr;
  1953. const llama_model & model;
  1954. // key + value cache for the self attention
  1955. struct llama_kv_cache kv_self;
  1956. std::mt19937 rng;
  1957. bool has_evaluated_once = false;
  1958. int64_t t_start_us;
  1959. int64_t t_load_us;
  1960. int64_t t_sample_us = 0;
  1961. int64_t t_p_eval_us = 0;
  1962. int64_t t_eval_us = 0;
  1963. int64_t t_compute_start_us = 0;
  1964. int64_t n_queued_tokens = 0;
  1965. int32_t n_sample = 0; // number of tokens sampled
  1966. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  1967. int32_t n_eval = 0; // number of eval calls
  1968. // host buffer for the model output (logits and embeddings)
  1969. ggml_backend_buffer_t buf_output = nullptr;
  1970. // decode output (2-dimensional array: [n_outputs][n_vocab])
  1971. size_t logits_size = 0; // capacity (of floats) for logits
  1972. float * logits = nullptr;
  1973. std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
  1974. size_t output_size = 0; // capacity (of tokens positions) for the output buffers
  1975. int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
  1976. bool logits_all = false;
  1977. // embeddings output (2-dimensional array: [n_outputs][n_embd])
  1978. // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
  1979. size_t embd_size = 0; // capacity (of floats) for embeddings
  1980. float * embd = nullptr;
  1981. // sequence embeddings output (map of [n_embd] vectors)
  1982. // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
  1983. std::map<llama_seq_id, std::vector<float>> embd_seq;
  1984. // memory buffers used to evaluate the model
  1985. std::vector<uint8_t> buf_compute_meta;
  1986. ggml_backend_sched_t sched = nullptr;
  1987. ggml_abort_callback abort_callback = nullptr;
  1988. void * abort_callback_data = nullptr;
  1989. // input tensors
  1990. struct ggml_tensor * inp_tokens; // I32 [n_batch]
  1991. struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
  1992. struct ggml_tensor * inp_pos; // I32 [n_batch]
  1993. struct ggml_tensor * inp_out_ids; // I32 [n_outputs]
  1994. struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
  1995. struct ggml_tensor * inp_K_shift; // I32 [kv_size]
  1996. struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
  1997. struct ggml_tensor * inp_cls; // I32 [n_batch]
  1998. struct ggml_tensor * inp_s_copy; // I32 [kv_size]
  1999. struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]
  2000. struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch]
  2001. // control vectors
  2002. struct llama_control_vector cvec;
  2003. };
  2004. static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_model & model, int gpu) {
  2005. ggml_backend_buffer_type_t buft = nullptr;
  2006. #ifdef GGML_USE_RPC
  2007. std::string endpoint = model.rpc_servers[gpu];
  2008. buft = ggml_backend_rpc_buffer_type(endpoint.c_str());
  2009. #elif defined(GGML_USE_METAL)
  2010. buft = ggml_backend_metal_buffer_type();
  2011. #elif defined(GGML_USE_CUDA)
  2012. buft = ggml_backend_cuda_buffer_type(gpu);
  2013. #elif defined(GGML_USE_VULKAN)
  2014. buft = ggml_backend_vk_buffer_type(gpu);
  2015. #elif defined(GGML_USE_SYCL)
  2016. buft = ggml_backend_sycl_buffer_type(gpu);
  2017. #elif defined(GGML_USE_CLBLAST)
  2018. buft = ggml_backend_opencl_buffer_type();
  2019. #elif defined(GGML_USE_KOMPUTE)
  2020. buft = ggml_backend_kompute_buffer_type(gpu);
  2021. if (buft == nullptr) {
  2022. LLAMA_LOG_WARN("%s: cannot use GPU %d, check `vulkaninfo --summary`\n", __func__, gpu);
  2023. }
  2024. #endif
  2025. if (buft == nullptr) {
  2026. buft = llama_default_buffer_type_cpu(true);
  2027. }
  2028. return buft;
  2029. GGML_UNUSED(model);
  2030. GGML_UNUSED(gpu);
  2031. }
  2032. static ggml_backend_buffer_type_t llama_default_buffer_type_split(const llama_model & model, int fallback_gpu, const float * tensor_split) {
  2033. ggml_backend_buffer_type_t buft = nullptr;
  2034. #ifdef GGML_USE_CUDA
  2035. if (ggml_backend_cuda_get_device_count() > 1) {
  2036. buft = ggml_backend_cuda_split_buffer_type(tensor_split);
  2037. }
  2038. #endif
  2039. #ifdef GGML_USE_SYCL
  2040. if (ggml_backend_sycl_get_device_count() > 1) {
  2041. buft = ggml_backend_sycl_split_buffer_type(tensor_split);
  2042. }
  2043. #endif
  2044. if (buft == nullptr) {
  2045. buft = llama_default_buffer_type_offload(model, fallback_gpu);
  2046. }
  2047. return buft;
  2048. GGML_UNUSED(tensor_split);
  2049. }
  2050. static size_t llama_get_device_count(const llama_model & model) {
  2051. #if defined(GGML_USE_RPC)
  2052. return model.rpc_servers.size();
  2053. #elif defined(GGML_USE_CUDA)
  2054. return ggml_backend_cuda_get_device_count();
  2055. #elif defined(GGML_USE_SYCL)
  2056. return ggml_backend_sycl_get_device_count();
  2057. #elif defined(GGML_USE_VULKAN)
  2058. return ggml_backend_vk_get_device_count();
  2059. #else
  2060. return 1;
  2061. #endif
  2062. GGML_UNUSED(model);
  2063. }
  2064. static size_t llama_get_device_memory(const llama_model & model, int device) {
  2065. #if defined(GGML_USE_RPC)
  2066. size_t total;
  2067. size_t free;
  2068. std::string endpoint = model.rpc_servers[device];
  2069. ggml_backend_rpc_get_device_memory(endpoint.c_str(), &free, &total);
  2070. return free;
  2071. #elif defined(GGML_USE_CUDA)
  2072. size_t total;
  2073. size_t free;
  2074. ggml_backend_cuda_get_device_memory(device, &free, &total);
  2075. return free;
  2076. #elif defined(GGML_USE_SYCL)
  2077. size_t total;
  2078. size_t free;
  2079. ggml_backend_sycl_get_device_memory(device, &free, &total);
  2080. return free;
  2081. #elif defined(GGML_USE_VULKAN)
  2082. size_t total;
  2083. size_t free;
  2084. ggml_backend_vk_get_device_memory(device, &free, &total);
  2085. return free;
  2086. #else
  2087. return 1;
  2088. #endif
  2089. GGML_UNUSED(model);
  2090. GGML_UNUSED(device);
  2091. }
  2092. //
  2093. // kv cache helpers
  2094. //
  2095. static bool llama_kv_cache_init(
  2096. struct llama_kv_cache & cache,
  2097. const llama_context * ctx,
  2098. ggml_type type_k,
  2099. ggml_type type_v,
  2100. uint32_t kv_size,
  2101. bool offload) {
  2102. const llama_model & model = ctx->model;
  2103. const llama_cparams & cparams = ctx->cparams;
  2104. const struct llama_hparams & hparams = model.hparams;
  2105. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  2106. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  2107. const int64_t n_layer = hparams.n_layer;
  2108. cache.has_shift = false;
  2109. // TODO: find a nicer way to add other recurrent model architectures
  2110. cache.recurrent = model.arch == LLM_ARCH_MAMBA;
  2111. cache.v_trans = !cparams.flash_attn;
  2112. // TODO: support mixed recurrent Transformer architectures
  2113. // NOTE: (!a || b) is a logical implication (a -> b)
  2114. GGML_ASSERT(!cache.recurrent || n_embd_k_gqa == hparams.n_embd_k_s());
  2115. GGML_ASSERT(!cache.recurrent || n_embd_v_gqa == hparams.n_embd_v_s());
  2116. GGML_ASSERT( cache.recurrent || n_embd_k_gqa == hparams.n_embd_k_gqa());
  2117. GGML_ASSERT( cache.recurrent || n_embd_v_gqa == hparams.n_embd_v_gqa());
  2118. cache.head = 0;
  2119. cache.size = kv_size;
  2120. cache.used = 0;
  2121. cache.type_k = type_k;
  2122. cache.type_v = type_v;
  2123. cache.cells.clear();
  2124. cache.cells.resize(kv_size);
  2125. if (cache.recurrent) {
  2126. // init state copy sources
  2127. for (uint32_t i = 0; i < cache.size; ++i) {
  2128. cache.cells[i].src = i;
  2129. }
  2130. }
  2131. #ifdef GGML_USE_CLBLAST
  2132. offload = false;
  2133. #endif
  2134. // count used buffer types
  2135. std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
  2136. if (offload) {
  2137. for (int64_t i = 0; i < n_layer; ++i) {
  2138. buft_layer_count[model.buft_layer[i].buft]++;
  2139. }
  2140. } else {
  2141. buft_layer_count[llama_default_buffer_type_cpu(true)] = n_layer;
  2142. }
  2143. // create a context for each buffer type
  2144. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  2145. for (auto & it : buft_layer_count) {
  2146. int n_layers = it.second;
  2147. struct ggml_init_params params = {
  2148. /*.mem_size =*/ 2u*n_layers*ggml_tensor_overhead(),
  2149. /*.mem_buffer =*/ NULL,
  2150. /*.no_alloc =*/ true,
  2151. };
  2152. ggml_context * ctx = ggml_init(params);
  2153. if (!ctx) {
  2154. LLAMA_LOG_ERROR("%s: failed to allocate context for kv cache\n", __func__);
  2155. return false;
  2156. }
  2157. ctx_map[it.first] = ctx;
  2158. cache.ctxs.push_back(ctx);
  2159. }
  2160. cache.k_l.reserve(n_layer);
  2161. cache.v_l.reserve(n_layer);
  2162. for (int i = 0; i < (int) n_layer; i++) {
  2163. struct ggml_context * ctx = offload ? ctx_map.at(model.buft_layer[i].buft) : cache.ctxs.front();
  2164. ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
  2165. ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size);
  2166. ggml_format_name(k, "cache_k_l%d", i);
  2167. ggml_format_name(v, "cache_v_l%d", i);
  2168. cache.k_l.push_back(k);
  2169. cache.v_l.push_back(v);
  2170. }
  2171. // allocate tensors and initialize the buffers to avoid NaNs in the padding
  2172. for (auto it : ctx_map) {
  2173. ggml_backend_buffer_type_t buft = it.first;
  2174. ggml_context * ctx = it.second;
  2175. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  2176. if (!buf) {
  2177. LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__);
  2178. return false;
  2179. }
  2180. ggml_backend_buffer_clear(buf, 0);
  2181. LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
  2182. cache.bufs.push_back(buf);
  2183. }
  2184. return true;
  2185. }
  2186. // find an empty slot of size "n_tokens" in the cache
  2187. // updates the cache head
  2188. // Note: On success, it's important that cache.head points
  2189. // to the first cell of the slot.
  2190. static bool llama_kv_cache_find_slot(
  2191. struct llama_kv_cache & cache,
  2192. const struct llama_batch & batch) {
  2193. const uint32_t n_ctx = cache.size;
  2194. const uint32_t n_tokens = batch.n_tokens;
  2195. if (cache.recurrent) {
  2196. // For recurrent state architectures (like Mamba),
  2197. // each KV cache cell can store the state for a whole sequence.
  2198. llama_seq_id min = cache.size - 1;
  2199. llama_seq_id max = 0;
  2200. for (uint32_t i = 0; i < n_tokens; ++i) {
  2201. for (int32_t j = 0; j < batch.n_seq_id[i]; ++j) {
  2202. llama_seq_id seq_id = batch.seq_id[i][j];
  2203. // make sure it's a valid seq_id
  2204. if ((uint32_t) seq_id < cache.size) {
  2205. if (seq_id > max) {
  2206. max = seq_id;
  2207. }
  2208. if (seq_id < min) {
  2209. min = seq_id;
  2210. }
  2211. // Assuming the tokens are in-order
  2212. if (batch.pos[i] != cache.cells[seq_id].pos + 1) {
  2213. // What should happen when the pos backtracks or skips a value?
  2214. // Clearing the state mid-batch would require special-casing which isn't done.
  2215. LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d\n",
  2216. __func__, batch.pos[i], cache.cells[seq_id].pos, seq_id);
  2217. }
  2218. if (cache.cells[seq_id].pos < 0 && 0 <= batch.pos[i]) {
  2219. cache.used += 1;
  2220. }
  2221. cache.cells[seq_id].pos = batch.pos[i];
  2222. // NOTE: seq_ids are not inserted here; they are handled when the input tensors are set
  2223. } else {
  2224. // too big seq_id
  2225. // TODO: would it be possible to resize the KV cache size instead?
  2226. LLAMA_LOG_ERROR("%s: seq_id=%d >= kv_size=%d Try using a bigger --parallel value\n", __func__, seq_id, cache.size);
  2227. return false;
  2228. }
  2229. }
  2230. }
  2231. // allow getting the range of used cells, from head to head + n
  2232. cache.head = min;
  2233. cache.n = max - min + 1;
  2234. // sanity check
  2235. return max >= min;
  2236. }
  2237. // otherwise, one cell per token.
  2238. if (n_tokens > n_ctx) {
  2239. LLAMA_LOG_ERROR("%s: n_tokens=%d > n_ctx=%d\n", __func__, n_tokens, n_ctx);
  2240. return false;
  2241. }
  2242. uint32_t n_tested = 0;
  2243. while (true) {
  2244. if (cache.head + n_tokens > n_ctx) {
  2245. n_tested += n_ctx - cache.head;
  2246. cache.head = 0;
  2247. continue;
  2248. }
  2249. bool found = true;
  2250. for (uint32_t i = 0; i < n_tokens; i++) {
  2251. if (cache.cells[cache.head + i].pos >= 0) {
  2252. found = false;
  2253. cache.head += i + 1;
  2254. n_tested += i + 1;
  2255. break;
  2256. }
  2257. }
  2258. if (found) {
  2259. break;
  2260. }
  2261. if (n_tested >= n_ctx) {
  2262. //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
  2263. return false;
  2264. }
  2265. }
  2266. for (uint32_t i = 0; i < n_tokens; i++) {
  2267. cache.cells[cache.head + i].pos = batch.pos[i];
  2268. for (int32_t j = 0; j < batch.n_seq_id[i]; j++) {
  2269. cache.cells[cache.head + i].seq_id.insert(batch.seq_id[i][j]);
  2270. }
  2271. }
  2272. cache.used += n_tokens;
  2273. return true;
  2274. }
  2275. // find how many cells are currently in use
  2276. static uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
  2277. for (uint32_t i = cache.size; i > 0; --i) {
  2278. const llama_kv_cell & cell = cache.cells[i - 1];
  2279. if (cell.pos >= 0 && !cell.is_empty()) {
  2280. return i;
  2281. }
  2282. }
  2283. return 0;
  2284. }
  2285. static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
  2286. for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
  2287. cache.cells[i].pos = -1;
  2288. cache.cells[i].seq_id.clear();
  2289. }
  2290. cache.head = 0;
  2291. cache.used = 0;
  2292. for (auto & buf : cache.bufs) {
  2293. ggml_backend_buffer_clear(buf, 0);
  2294. }
  2295. }
  2296. static bool llama_kv_cache_seq_rm(
  2297. struct llama_kv_cache & cache,
  2298. llama_seq_id seq_id,
  2299. llama_pos p0,
  2300. llama_pos p1) {
  2301. uint32_t new_head = cache.size;
  2302. if (p0 < 0) p0 = 0;
  2303. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  2304. // models like Mamba can't have a state partially erased
  2305. if (cache.recurrent) {
  2306. if (seq_id >= (int64_t) cache.size) {
  2307. // could be fatal
  2308. return false;
  2309. }
  2310. if (0 <= seq_id) {
  2311. // partial intersection is invalid
  2312. if ((0 < p0 && p0 <= cache.cells[seq_id].pos) || (0 < p1 && p1 <= cache.cells[seq_id].pos)) {
  2313. return false;
  2314. }
  2315. } else {
  2316. // seq_id is negative, then the range should include everything or nothing
  2317. if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
  2318. return false;
  2319. }
  2320. }
  2321. }
  2322. for (uint32_t i = 0; i < cache.size; ++i) {
  2323. if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  2324. if (seq_id < 0) {
  2325. cache.cells[i].seq_id.clear();
  2326. } else if (cache.cells[i].has_seq_id(seq_id)) {
  2327. cache.cells[i].seq_id.erase(seq_id);
  2328. } else {
  2329. continue;
  2330. }
  2331. if (cache.cells[i].is_empty()) {
  2332. // keep count of the number of used cells
  2333. if (cache.cells[i].pos >= 0) cache.used--;
  2334. cache.cells[i].pos = -1;
  2335. if (new_head == cache.size) new_head = i;
  2336. }
  2337. }
  2338. }
  2339. // If we freed up a slot, set head to it so searching can start there.
  2340. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  2341. return true;
  2342. }
  2343. static void llama_kv_cache_seq_cp(
  2344. struct llama_kv_cache & cache,
  2345. llama_seq_id seq_id_src,
  2346. llama_seq_id seq_id_dst,
  2347. llama_pos p0,
  2348. llama_pos p1) {
  2349. if (p0 < 0) p0 = 0;
  2350. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  2351. if (cache.recurrent) {
  2352. if ((uint32_t) seq_id_dst < cache.size && (uint32_t) seq_id_src < cache.size) {
  2353. seq_id_src = cache.cells[seq_id_src].src;
  2354. GGML_ASSERT((uint32_t) seq_id_src < cache.size);
  2355. // intent to "copy from"
  2356. // supports copy chains thanks to taking the source of the source
  2357. cache.cells[seq_id_dst].src = seq_id_src;
  2358. // preserve the "keep or clear" status of the copied sequence
  2359. if (cache.cells[seq_id_src].has_seq_id(seq_id_src)) {
  2360. cache.cells[seq_id_dst].seq_id.insert(seq_id_dst);
  2361. } else {
  2362. cache.cells[seq_id_dst].seq_id.erase(seq_id_dst);
  2363. }
  2364. cache.do_copy = true;
  2365. cache.cells[seq_id_dst].pos = cache.cells[seq_id_src].pos;
  2366. }
  2367. return;
  2368. }
  2369. // otherwise, this is the KV cache of a Transformer-like model
  2370. cache.head = 0;
  2371. for (uint32_t i = 0; i < cache.size; ++i) {
  2372. if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  2373. cache.cells[i].seq_id.insert(seq_id_dst);
  2374. }
  2375. }
  2376. }
  2377. static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
  2378. uint32_t new_head = cache.size;
  2379. for (uint32_t i = 0; i < cache.size; ++i) {
  2380. if (!cache.cells[i].has_seq_id(seq_id)) {
  2381. if (cache.cells[i].pos >= 0) cache.used--;
  2382. cache.cells[i].pos = -1;
  2383. cache.cells[i].seq_id.clear();
  2384. if (new_head == cache.size) new_head = i;
  2385. } else {
  2386. cache.cells[i].seq_id.clear();
  2387. cache.cells[i].seq_id.insert(seq_id);
  2388. }
  2389. }
  2390. // If we freed up a slot, set head to it so searching can start there.
  2391. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  2392. }
  2393. static void llama_kv_cache_seq_add(
  2394. struct llama_kv_cache & cache,
  2395. llama_seq_id seq_id,
  2396. llama_pos p0,
  2397. llama_pos p1,
  2398. llama_pos delta) {
  2399. uint32_t new_head = cache.size;
  2400. if (p0 < 0) p0 = 0;
  2401. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  2402. if (cache.recurrent) {
  2403. // for Mamba-like models, only the pos needs to be shifted
  2404. if (0 <= seq_id && seq_id < (int64_t) cache.size) {
  2405. llama_kv_cell & cell = cache.cells[seq_id];
  2406. if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
  2407. cell.pos += delta;
  2408. }
  2409. }
  2410. return;
  2411. }
  2412. for (uint32_t i = 0; i < cache.size; ++i) {
  2413. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  2414. cache.has_shift = true;
  2415. cache.cells[i].pos += delta;
  2416. cache.cells[i].delta += delta;
  2417. if (cache.cells[i].pos < 0) {
  2418. if (!cache.cells[i].is_empty()) {
  2419. cache.used--;
  2420. }
  2421. cache.cells[i].pos = -1;
  2422. cache.cells[i].seq_id.clear();
  2423. if (new_head == cache.size) {
  2424. new_head = i;
  2425. }
  2426. }
  2427. }
  2428. }
  2429. // If we freed up a slot, set head to it so searching can start there.
  2430. // Otherwise we just start the next search from the beginning.
  2431. cache.head = new_head != cache.size ? new_head : 0;
  2432. }
  2433. static void llama_kv_cache_seq_div(
  2434. struct llama_kv_cache & cache,
  2435. llama_seq_id seq_id,
  2436. llama_pos p0,
  2437. llama_pos p1,
  2438. int d) {
  2439. if (p0 < 0) p0 = 0;
  2440. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  2441. if (cache.recurrent) {
  2442. // for Mamba-like models, only the pos needs to be changed
  2443. if (0 <= seq_id && seq_id < (int64_t) cache.size) {
  2444. llama_kv_cell & cell = cache.cells[seq_id];
  2445. if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
  2446. cell.pos /= d;
  2447. }
  2448. }
  2449. return;
  2450. }
  2451. for (uint32_t i = 0; i < cache.size; ++i) {
  2452. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  2453. cache.has_shift = true;
  2454. {
  2455. llama_pos p_old = cache.cells[i].pos;
  2456. cache.cells[i].pos /= d;
  2457. cache.cells[i].delta += cache.cells[i].pos - p_old;
  2458. }
  2459. }
  2460. }
  2461. }
  2462. static llama_pos llama_kv_cache_seq_pos_max(struct llama_kv_cache & cache, llama_seq_id seq_id) {
  2463. llama_pos result = 0;
  2464. for (uint32_t i = 0; i < cache.size; ++i) {
  2465. if (cache.cells[i].has_seq_id(seq_id)) {
  2466. result = std::max(result, cache.cells[i].pos);
  2467. }
  2468. }
  2469. return result;
  2470. }
  2471. static void llama_kv_cache_defrag(struct llama_kv_cache & cache) {
  2472. cache.do_defrag = true;
  2473. }
  2474. static uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) {
  2475. // the FA kernels require padding to avoid extra runtime boundary checks
  2476. return cparams.flash_attn ? 256u : 32u;
  2477. }
  2478. //
  2479. // model loading and saving
  2480. //
  2481. enum llama_fver {
  2482. GGUF_FILE_VERSION_V1 = 1,
  2483. GGUF_FILE_VERSION_V2 = 2,
  2484. GGUF_FILE_VERSION_V3 = 3,
  2485. };
  2486. static const char * llama_file_version_name(llama_fver version) {
  2487. switch (version) {
  2488. case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
  2489. case GGUF_FILE_VERSION_V2: return "GGUF V2";
  2490. case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
  2491. }
  2492. return "unknown";
  2493. }
  2494. static std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
  2495. char buf[256];
  2496. snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
  2497. for (size_t i = 1; i < ne.size(); i++) {
  2498. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
  2499. }
  2500. return buf;
  2501. }
  2502. static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
  2503. char buf[256];
  2504. snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
  2505. for (int i = 1; i < GGML_MAX_DIMS; i++) {
  2506. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
  2507. }
  2508. return buf;
  2509. }
  2510. namespace GGUFMeta {
  2511. template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int)>
  2512. struct GKV_Base_Type {
  2513. static constexpr gguf_type gt = gt_;
  2514. static T getter(const gguf_context * ctx, const int kid) {
  2515. return gfun(ctx, kid);
  2516. }
  2517. };
  2518. template<typename T> struct GKV_Base;
  2519. template<> struct GKV_Base<bool >: GKV_Base_Type<bool, GGUF_TYPE_BOOL, gguf_get_val_bool> {};
  2520. template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, GGUF_TYPE_UINT8, gguf_get_val_u8 > {};
  2521. template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, GGUF_TYPE_UINT16, gguf_get_val_u16 > {};
  2522. template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, GGUF_TYPE_UINT32, gguf_get_val_u32 > {};
  2523. template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, GGUF_TYPE_UINT64, gguf_get_val_u64 > {};
  2524. template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, GGUF_TYPE_INT8, gguf_get_val_i8 > {};
  2525. template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, GGUF_TYPE_INT16, gguf_get_val_i16 > {};
  2526. template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, GGUF_TYPE_INT32, gguf_get_val_i32 > {};
  2527. template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, GGUF_TYPE_INT64, gguf_get_val_i64 > {};
  2528. template<> struct GKV_Base<float >: GKV_Base_Type<float, GGUF_TYPE_FLOAT32, gguf_get_val_f32 > {};
  2529. template<> struct GKV_Base<double >: GKV_Base_Type<double, GGUF_TYPE_FLOAT64, gguf_get_val_f64 > {};
  2530. template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, GGUF_TYPE_STRING, gguf_get_val_str > {};
  2531. template<> struct GKV_Base<std::string> {
  2532. static constexpr gguf_type gt = GGUF_TYPE_STRING;
  2533. static std::string getter(const gguf_context * ctx, const int kid) {
  2534. return gguf_get_val_str(ctx, kid);
  2535. }
  2536. };
  2537. struct ArrayInfo {
  2538. const gguf_type gt;
  2539. const size_t length;
  2540. const void * data;
  2541. };
  2542. template<> struct GKV_Base<ArrayInfo> {
  2543. public:
  2544. static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
  2545. static ArrayInfo getter(const gguf_context *ctx, const int k) {
  2546. return ArrayInfo {
  2547. gguf_get_arr_type(ctx, k),
  2548. size_t(gguf_get_arr_n(ctx, k)),
  2549. gguf_get_arr_data(ctx, k),
  2550. };
  2551. }
  2552. };
  2553. template<typename T>
  2554. class GKV : public GKV_Base<T> {
  2555. GKV() = delete;
  2556. public:
  2557. static T get_kv(const gguf_context * ctx, const int k) {
  2558. const enum gguf_type kt = gguf_get_kv_type(ctx, k);
  2559. if (kt != GKV::gt) {
  2560. throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
  2561. gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt)));
  2562. }
  2563. return GKV::getter(ctx, k);
  2564. }
  2565. static const char * override_type_to_str(const llama_model_kv_override_type ty) {
  2566. switch (ty) {
  2567. case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool";
  2568. case LLAMA_KV_OVERRIDE_TYPE_INT: return "int";
  2569. case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float";
  2570. case LLAMA_KV_OVERRIDE_TYPE_STR: return "str";
  2571. }
  2572. return "unknown";
  2573. }
  2574. static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) {
  2575. if (!ovrd) { return false; }
  2576. if (ovrd->tag == expected_type) {
  2577. LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
  2578. __func__, override_type_to_str(ovrd->tag), ovrd->key);
  2579. switch (ovrd->tag) {
  2580. case LLAMA_KV_OVERRIDE_TYPE_BOOL: {
  2581. LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false");
  2582. } break;
  2583. case LLAMA_KV_OVERRIDE_TYPE_INT: {
  2584. LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64);
  2585. } break;
  2586. case LLAMA_KV_OVERRIDE_TYPE_FLOAT: {
  2587. LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64);
  2588. } break;
  2589. case LLAMA_KV_OVERRIDE_TYPE_STR: {
  2590. LLAMA_LOG_INFO("%s\n", ovrd->val_str);
  2591. } break;
  2592. default:
  2593. // Shouldn't be possible to end up here, but just in case...
  2594. throw std::runtime_error(
  2595. format("Unsupported attempt to override %s type for metadata key %s\n",
  2596. override_type_to_str(ovrd->tag), ovrd->key));
  2597. }
  2598. return true;
  2599. }
  2600. LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
  2601. __func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag));
  2602. return false;
  2603. }
  2604. template<typename OT>
  2605. static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
  2606. try_override(OT & target, const struct llama_model_kv_override * ovrd) {
  2607. if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) {
  2608. target = ovrd->val_bool;
  2609. return true;
  2610. }
  2611. return false;
  2612. }
  2613. template<typename OT>
  2614. static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
  2615. try_override(OT & target, const struct llama_model_kv_override * ovrd) {
  2616. if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) {
  2617. target = ovrd->val_i64;
  2618. return true;
  2619. }
  2620. return false;
  2621. }
  2622. template<typename OT>
  2623. static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
  2624. try_override(T & target, const struct llama_model_kv_override * ovrd) {
  2625. if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) {
  2626. target = ovrd->val_f64;
  2627. return true;
  2628. }
  2629. return false;
  2630. }
  2631. template<typename OT>
  2632. static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
  2633. try_override(T & target, const struct llama_model_kv_override * ovrd) {
  2634. if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) {
  2635. target = ovrd->val_str;
  2636. return true;
  2637. }
  2638. return false;
  2639. }
  2640. static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
  2641. if (try_override<T>(target, ovrd)) {
  2642. return true;
  2643. }
  2644. if (k < 0) { return false; }
  2645. target = get_kv(ctx, k);
  2646. return true;
  2647. }
  2648. static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
  2649. return set(ctx, gguf_find_key(ctx, key), target, ovrd);
  2650. }
  2651. static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
  2652. return set(ctx, key.c_str(), target, ovrd);
  2653. }
  2654. };
  2655. }
  2656. using llama_buf_map = std::unordered_map<uint32_t, ggml_backend_buffer_t>;
  2657. struct llama_model_loader {
  2658. int n_kv = 0;
  2659. int n_tensors = 0;
  2660. int n_created = 0;
  2661. int64_t n_elements = 0;
  2662. size_t n_bytes = 0;
  2663. bool use_mmap = false;
  2664. bool check_tensors;
  2665. llama_files files;
  2666. llama_ftype ftype;
  2667. llama_fver fver;
  2668. llama_mmaps mappings;
  2669. // Holds information on a model weight
  2670. struct llama_tensor_weight {
  2671. uint16_t idx; // source file index
  2672. size_t offs; // tensor data offset in the original file
  2673. ggml_tensor * tensor;
  2674. llama_tensor_weight(const llama_file * file, uint16_t idx, const char * name, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) {
  2675. const int tensor_idx = gguf_find_tensor(gguf_ctx, name);
  2676. offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx);
  2677. if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) {
  2678. throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", name));
  2679. }
  2680. }
  2681. };
  2682. std::vector<llama_tensor_weight> weights;
  2683. std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
  2684. struct gguf_context * meta = NULL;
  2685. std::vector<ggml_context *> contexts;
  2686. std::string arch_name;
  2687. LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
  2688. llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) {
  2689. int trace = 0;
  2690. if (getenv("LLAMA_TRACE")) {
  2691. trace = atoi(getenv("LLAMA_TRACE"));
  2692. }
  2693. if (param_overrides_p != nullptr) {
  2694. for (const struct llama_model_kv_override *p = param_overrides_p; p->key[0] != 0; p++) {
  2695. kv_overrides.insert({std::string(p->key), *p});
  2696. }
  2697. }
  2698. struct ggml_context * ctx = NULL;
  2699. struct gguf_init_params params = {
  2700. /*.no_alloc = */ true,
  2701. /*.ctx = */ &ctx,
  2702. };
  2703. meta = gguf_init_from_file(fname.c_str(), params);
  2704. if (!meta) {
  2705. throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
  2706. }
  2707. get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
  2708. llm_kv = LLM_KV(llm_arch_from_string(arch_name));
  2709. files.emplace_back(new llama_file(fname.c_str(), "rb"));
  2710. contexts.emplace_back(ctx);
  2711. // Save tensors data offset of the main file.
  2712. // For subsidiary files, `meta` tensor data offset must not be used,
  2713. // so we build a unified tensors index for weights.
  2714. for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
  2715. weights.emplace_back(files.back().get(), 0, cur->name, meta, cur);
  2716. }
  2717. uint16_t n_split = 0;
  2718. get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false);
  2719. // Load additional GGML contexts
  2720. if (n_split > 1) {
  2721. uint16_t idx = 0;
  2722. get_key(llm_kv(LLM_KV_SPLIT_NO), idx);
  2723. if (idx != 0) {
  2724. throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx));
  2725. }
  2726. char split_prefix[PATH_MAX] = {0};
  2727. if (!llama_split_prefix(split_prefix, sizeof(split_prefix), fname.c_str(), idx, n_split)) {
  2728. throw std::runtime_error(format("invalid split file: %s", fname.c_str()));
  2729. }
  2730. if (trace > 0) {
  2731. LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
  2732. }
  2733. char split_path[PATH_MAX] = {0};
  2734. for (idx = 1; idx < n_split; idx++) {
  2735. llama_split_path(split_path, sizeof(split_path), split_prefix, idx, n_split);
  2736. struct gguf_init_params split_params = {
  2737. /*.no_alloc = */ true,
  2738. /*.ctx = */ &ctx,
  2739. };
  2740. struct gguf_context * ctx_gguf = gguf_init_from_file(split_path, split_params);
  2741. if (!ctx_gguf) {
  2742. throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path));
  2743. }
  2744. files.emplace_back(new llama_file(split_path, "rb"));
  2745. contexts.emplace_back(ctx);
  2746. // Save tensors data offset info of the shard.
  2747. for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
  2748. weights.emplace_back(files.back().get(), idx, cur->name, ctx_gguf, cur);
  2749. }
  2750. gguf_free(ctx_gguf);
  2751. }
  2752. get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors);
  2753. // sanity check
  2754. {
  2755. const int n_tensors_loaded = (int) weights.size();
  2756. if (n_tensors != n_tensors_loaded) {
  2757. throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded));
  2758. }
  2759. }
  2760. LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1);
  2761. }
  2762. n_kv = gguf_get_n_kv(meta);
  2763. n_tensors = weights.size();
  2764. fver = (enum llama_fver) gguf_get_version(meta);
  2765. std::set<std::string> tensor_names;
  2766. for (auto & w : weights) {
  2767. n_elements += ggml_nelements(w.tensor);
  2768. n_bytes += ggml_nbytes(w.tensor);
  2769. // make sure there is no duplicated tensor names
  2770. const std::string name(w.tensor->name);
  2771. auto found = tensor_names.find(name);
  2772. if (found != tensor_names.end()) {
  2773. throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", w.tensor->name));
  2774. }
  2775. tensor_names.insert(name);
  2776. }
  2777. LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
  2778. __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
  2779. // determine file type based on the number of tensors for each quantization and print meta data
  2780. // TODO: make optional
  2781. {
  2782. std::map<enum ggml_type, uint32_t> n_type;
  2783. uint32_t n_type_max = 0;
  2784. enum ggml_type type_max = GGML_TYPE_F32;
  2785. for (int i = 0; i < n_tensors; i++) {
  2786. const ggml_tensor * tensor = weights.at(i).tensor;
  2787. enum ggml_type type = tensor->type;
  2788. n_type[type]++;
  2789. if (n_type_max < n_type[type]) {
  2790. n_type_max = n_type[type];
  2791. type_max = type;
  2792. }
  2793. if (trace > 0) {
  2794. const uint16_t sid = weights.at(i).idx;
  2795. LLAMA_LOG_INFO("%s: - tensor %4d, split %2d: %32s %-8s [ %s ]\n", __func__, i, sid, ggml_get_name(tensor), ggml_type_name(type), llama_format_tensor_shape(tensor).c_str());
  2796. }
  2797. }
  2798. switch (type_max) {
  2799. case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
  2800. case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
  2801. case GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break;
  2802. case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
  2803. case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
  2804. case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
  2805. case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
  2806. case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
  2807. case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
  2808. case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
  2809. case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
  2810. case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
  2811. case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
  2812. case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
  2813. case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
  2814. case GGML_TYPE_IQ2_S: ftype = LLAMA_FTYPE_MOSTLY_IQ2_S; break;
  2815. case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
  2816. case GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break;
  2817. case GGML_TYPE_IQ1_M: ftype = LLAMA_FTYPE_MOSTLY_IQ1_M; break;
  2818. case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
  2819. case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
  2820. case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
  2821. default:
  2822. {
  2823. LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
  2824. ftype = LLAMA_FTYPE_ALL_F32;
  2825. } break;
  2826. }
  2827. // this is a way to mark that we have "guessed" the file type
  2828. ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
  2829. {
  2830. const int kid = gguf_find_key(meta, "general.file_type");
  2831. if (kid >= 0) {
  2832. ftype = (llama_ftype) gguf_get_val_u32(meta, kid);
  2833. }
  2834. }
  2835. LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
  2836. for (int i = 0; i < n_kv; i++) {
  2837. const char * name = gguf_get_key(meta, i);
  2838. const enum gguf_type type = gguf_get_kv_type(meta, i);
  2839. const std::string type_name =
  2840. type == GGUF_TYPE_ARRAY
  2841. ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta, i)), gguf_get_arr_n(meta, i))
  2842. : gguf_type_name(type);
  2843. std::string value = gguf_kv_to_str(meta, i);
  2844. const size_t MAX_VALUE_LEN = 40;
  2845. if (value.size() > MAX_VALUE_LEN) {
  2846. value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
  2847. }
  2848. replace_all(value, "\n", "\\n");
  2849. LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
  2850. }
  2851. // print type counts
  2852. for (auto & kv : n_type) {
  2853. if (kv.second == 0) {
  2854. continue;
  2855. }
  2856. LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
  2857. }
  2858. }
  2859. if (!llama_mmap::SUPPORTED) {
  2860. LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
  2861. use_mmap = false;
  2862. }
  2863. this->use_mmap = use_mmap;
  2864. this->check_tensors = check_tensors;
  2865. }
  2866. ~llama_model_loader() {
  2867. if (meta) {
  2868. gguf_free(meta);
  2869. }
  2870. for (auto * ctx : contexts) {
  2871. ggml_free(ctx);
  2872. }
  2873. }
  2874. template<typename T>
  2875. typename std::enable_if<std::is_integral<T>::value, bool>::type
  2876. get_arr_n(const std::string & key, T & result, const bool required = true) {
  2877. const int kid = gguf_find_key(meta, key.c_str());
  2878. if (kid < 0) {
  2879. if (required) {
  2880. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  2881. }
  2882. return false;
  2883. }
  2884. struct GGUFMeta::ArrayInfo arr_info =
  2885. GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta, kid);
  2886. result = arr_info.length;
  2887. return true;
  2888. }
  2889. template<typename T>
  2890. typename std::enable_if<std::is_integral<T>::value, bool>::type
  2891. get_arr_n(const enum llm_kv kid, T & result, const bool required = true) {
  2892. return get_arr_n(llm_kv(kid), result, required);
  2893. }
  2894. template<typename T>
  2895. bool get_key(const std::string & key, T & result, const bool required = true) {
  2896. auto it = kv_overrides.find(key);
  2897. const struct llama_model_kv_override * override =
  2898. it != kv_overrides.end() ? &it->second : nullptr;
  2899. const bool found = GGUFMeta::GKV<T>::set(meta, key, result, override);
  2900. if (required && !found) {
  2901. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  2902. }
  2903. return found;
  2904. }
  2905. template<typename T>
  2906. bool get_key(const enum llm_kv kid, T & result, const bool required = true) {
  2907. return get_key(llm_kv(kid), result, required);
  2908. }
  2909. std::string get_arch_name() const {
  2910. return arch_name;
  2911. }
  2912. enum llm_arch get_arch() const {
  2913. return llm_kv.arch;
  2914. }
  2915. const char * get_tensor_name(int i) const {
  2916. return weights.at(i).tensor->name;
  2917. }
  2918. const llama_tensor_weight * get_weight(const char * name) const {
  2919. for (const auto & weight : weights) {
  2920. if (strcmp(name, weight.tensor->name) == 0) {
  2921. return &weight;
  2922. }
  2923. }
  2924. return nullptr;
  2925. }
  2926. const llama_tensor_weight * get_weight(int i) const {
  2927. return get_weight(get_tensor_name(i));
  2928. }
  2929. const llama_tensor_weight & require_weight(const char * name) const {
  2930. const llama_tensor_weight * weight = get_weight(name);
  2931. if (!weight) {
  2932. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
  2933. }
  2934. return *weight;
  2935. }
  2936. struct ggml_tensor * get_tensor_meta(const char * name) const {
  2937. const auto * weight = get_weight(name);
  2938. if (!weight) {
  2939. return nullptr;
  2940. }
  2941. return weight->tensor;
  2942. }
  2943. struct ggml_tensor * require_tensor_meta(const char * name) const {
  2944. struct ggml_tensor * tensor = get_tensor_meta(name);
  2945. if (!tensor) {
  2946. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
  2947. }
  2948. return tensor;
  2949. }
  2950. struct ggml_tensor * get_tensor_meta(int i) const {
  2951. return get_tensor_meta(get_tensor_name(i));
  2952. }
  2953. struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, const struct ggml_tensor * cur) {
  2954. struct ggml_tensor * tensor = ggml_dup_tensor(ctx, cur);
  2955. ggml_set_name(tensor, ggml_get_name(cur));
  2956. n_created++;
  2957. return tensor;
  2958. }
  2959. const struct ggml_tensor * check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const {
  2960. const struct ggml_tensor * cur = get_tensor_meta(name.c_str());
  2961. if (cur == NULL) {
  2962. if (!required) {
  2963. return NULL;
  2964. }
  2965. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
  2966. }
  2967. {
  2968. bool is_ok = true;
  2969. for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
  2970. if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) {
  2971. is_ok = false;
  2972. break;
  2973. }
  2974. }
  2975. if (!is_ok) {
  2976. throw std::runtime_error(
  2977. format("%s: tensor '%s' has wrong shape; expected %s, got %s",
  2978. __func__, name.c_str(),
  2979. llama_format_tensor_shape(ne).c_str(),
  2980. llama_format_tensor_shape(cur).c_str()));
  2981. }
  2982. }
  2983. return cur;
  2984. }
  2985. struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, bool required = true) {
  2986. const struct ggml_tensor * cur = check_tensor_dims(name, ne, required);
  2987. if (cur == NULL) {
  2988. return NULL;
  2989. }
  2990. return create_tensor_for(ctx, cur);
  2991. }
  2992. struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::vector<int64_t> & ne, size_t offset, bool required = true) {
  2993. const struct ggml_tensor * cur = check_tensor_dims(name, ne, required);
  2994. if (cur == NULL) {
  2995. return NULL;
  2996. }
  2997. if (cur->type != base->type) {
  2998. throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), ggml_type_name(base->type), ggml_type_name(cur->type)));
  2999. }
  3000. std::array<int64_t, GGML_MAX_DIMS> dims;
  3001. for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
  3002. dims[i] = i < ne.size() ? ne[i] : 1;
  3003. }
  3004. struct ggml_tensor * tensor = ggml_view_4d(ctx, base,
  3005. dims[0], dims[1], dims[2], dims[3],
  3006. cur->nb[1], cur->nb[2], cur->nb[3],
  3007. offset);
  3008. ggml_set_name(tensor, name.c_str());
  3009. n_created++;
  3010. return tensor;
  3011. }
  3012. void done_getting_tensors() const {
  3013. if (n_created != n_tensors) {
  3014. throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
  3015. }
  3016. }
  3017. void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr) {
  3018. if (use_mmap) {
  3019. mappings.reserve(files.size());
  3020. mmaps_used.reserve(files.size());
  3021. for (const auto & file : files) {
  3022. std::unique_ptr<llama_mmap> mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, ggml_is_numa()));
  3023. mmaps_used.emplace_back(mapping->size, 0);
  3024. if (mlock_mmaps) {
  3025. std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
  3026. mlock_mmap->init(mapping->addr);
  3027. mlock_mmaps->emplace_back(std::move(mlock_mmap));
  3028. }
  3029. mappings.emplace_back(std::move(mapping));
  3030. }
  3031. }
  3032. // compute the total size of all tensors for progress reporting
  3033. for (auto & w : weights) {
  3034. size_data += ggml_nbytes(w.tensor);
  3035. }
  3036. }
  3037. void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, ggml_context * ctx) const {
  3038. GGML_ASSERT(!mappings.empty());
  3039. const auto & mapping = mappings.at(idx);
  3040. *first = mapping->size;
  3041. *last = 0;
  3042. *addr = mapping->addr;
  3043. for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) {
  3044. try {
  3045. const auto * weight = get_weight(ggml_get_name(tensor));
  3046. if (!weight) {
  3047. continue;
  3048. }
  3049. if (weight->idx != idx) {
  3050. continue;
  3051. }
  3052. *first = std::min(*first, weight->offs);
  3053. *last = std::max(*last, weight->offs + ggml_nbytes(tensor));
  3054. } catch(...) {
  3055. // the tensor is not in the model
  3056. }
  3057. }
  3058. }
  3059. // for backwards compatibility, does not support ggml-backend
  3060. void load_data_for(struct ggml_tensor * cur) const {
  3061. const auto & w = require_weight(ggml_get_name(cur));
  3062. if (use_mmap) {
  3063. const auto & mapping = mappings.at(w.idx);
  3064. if (cur->data == nullptr) {
  3065. cur->data = (uint8_t *)mapping->addr + w.offs;
  3066. } else {
  3067. memcpy(cur->data, (uint8_t *)mapping->addr + w.offs, ggml_nbytes(cur));
  3068. }
  3069. } else {
  3070. GGML_ASSERT(cur->data != nullptr);
  3071. GGML_ASSERT(w.idx < files.size());
  3072. const auto & file = files.at(w.idx);
  3073. file->seek(w.offs, SEEK_SET);
  3074. file->read_raw(cur->data, ggml_nbytes(cur));
  3075. }
  3076. if (check_tensors && !ggml_validate_row_data(cur->type, cur->data, ggml_nbytes(cur))) {
  3077. throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
  3078. }
  3079. }
  3080. size_t size_done = 0;
  3081. size_t size_data = 0;
  3082. std::vector<std::pair<size_t, size_t>> mmaps_used;
  3083. // Returns false if cancelled by progress_callback
  3084. bool load_all_data(
  3085. struct ggml_context * ctx,
  3086. llama_buf_map & bufs_mmap,
  3087. llama_mlocks * lmlocks,
  3088. llama_progress_callback progress_callback,
  3089. void * progress_callback_user_data) {
  3090. GGML_ASSERT(size_data != 0 && "call init_mappings() first");
  3091. std::vector<no_init<uint8_t>> read_buf;
  3092. std::vector<std::future<std::pair<ggml_tensor *, bool>>> validation_result;
  3093. for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
  3094. const auto * weight = get_weight(ggml_get_name(cur));
  3095. if (weight == nullptr) {
  3096. // this can happen with split experts models
  3097. continue;
  3098. }
  3099. if (progress_callback) {
  3100. if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
  3101. return false;
  3102. }
  3103. }
  3104. size_t n_size = ggml_nbytes(cur);
  3105. if (use_mmap) {
  3106. const auto & mapping = mappings.at(weight->idx);
  3107. ggml_backend_buffer_t buf_mmap = nullptr;
  3108. if (bufs_mmap.count(weight->idx)) {
  3109. buf_mmap = bufs_mmap.at(weight->idx);
  3110. }
  3111. uint8_t * data = (uint8_t *) mapping->addr + weight->offs;
  3112. if (check_tensors) {
  3113. validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] {
  3114. return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size));
  3115. }));
  3116. }
  3117. GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated
  3118. if (buf_mmap && cur->data == nullptr) {
  3119. ggml_backend_tensor_alloc(buf_mmap, cur, data);
  3120. if (lmlocks) {
  3121. const auto & lmlock = lmlocks->at(weight->idx);
  3122. lmlock->grow_to(weight->offs + n_size);
  3123. }
  3124. auto & mmap_used = mmaps_used[weight->idx];
  3125. mmap_used.first = std::min(mmap_used.first, weight->offs);
  3126. mmap_used.second = std::max(mmap_used.second, weight->offs + n_size);
  3127. } else {
  3128. ggml_backend_tensor_set(cur, data, 0, n_size);
  3129. }
  3130. } else {
  3131. GGML_ASSERT(weight->idx < files.size());
  3132. const auto & file = files.at(weight->idx);
  3133. if (ggml_backend_buffer_is_host(cur->buffer)) {
  3134. file->seek(weight->offs, SEEK_SET);
  3135. file->read_raw(cur->data, n_size);
  3136. if (check_tensors) {
  3137. validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] {
  3138. return std::make_pair(cur, ggml_validate_row_data(cur->type, cur->data, n_size));
  3139. }));
  3140. }
  3141. } else {
  3142. read_buf.resize(n_size);
  3143. file->seek(weight->offs, SEEK_SET);
  3144. file->read_raw(read_buf.data(), n_size);
  3145. ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size);
  3146. if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) {
  3147. throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
  3148. }
  3149. }
  3150. }
  3151. size_done += n_size;
  3152. }
  3153. // check validation results
  3154. bool validation_failed = false;
  3155. for (auto & future : validation_result) {
  3156. auto result = future.get();
  3157. if (!result.second) {
  3158. LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, ggml_get_name(result.first));
  3159. validation_failed = true;
  3160. }
  3161. }
  3162. if (validation_failed) {
  3163. throw std::runtime_error("found tensors with invalid data");
  3164. }
  3165. // check if this is the last call and do final cleanup
  3166. if (size_done >= size_data) {
  3167. // unmap offloaded tensors and metadata
  3168. if (use_mmap) {
  3169. for (uint32_t idx = 0; idx < mappings.size(); idx++) {
  3170. const auto & mmap_used = mmaps_used.at(idx);
  3171. auto & mapping = mappings.at(idx);
  3172. mapping->unmap_fragment(0, mmap_used.first);
  3173. if (mmap_used.second != 0) {
  3174. mapping->unmap_fragment(mmap_used.second, mapping->size);
  3175. }
  3176. }
  3177. }
  3178. if (progress_callback) {
  3179. // Even though the model is done loading, we still honor
  3180. // cancellation since we need to free allocations.
  3181. return progress_callback(1.0f, progress_callback_user_data);
  3182. }
  3183. }
  3184. return true;
  3185. }
  3186. };
  3187. template<>
  3188. bool llama_model_loader::get_key(const enum llm_kv kid, enum llama_pooling_type & result, const bool required) {
  3189. uint32_t tmp;
  3190. const bool found = get_key(kid, tmp, required);
  3191. if (found) {
  3192. result = (enum llama_pooling_type) tmp;
  3193. } else {
  3194. result = LLAMA_POOLING_TYPE_UNSPECIFIED;
  3195. }
  3196. return found;
  3197. }
  3198. //
  3199. // load LLaMA models
  3200. //
  3201. static const char * llama_model_arch_name(llm_arch arch) {
  3202. auto it = LLM_ARCH_NAMES.find(arch);
  3203. if (it == LLM_ARCH_NAMES.end()) {
  3204. return "unknown";
  3205. }
  3206. return it->second;
  3207. }
  3208. static std::string llama_model_ftype_name(llama_ftype ftype) {
  3209. if (ftype & LLAMA_FTYPE_GUESSED) {
  3210. return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
  3211. }
  3212. switch (ftype) {
  3213. case LLAMA_FTYPE_ALL_F32: return "all F32";
  3214. case LLAMA_FTYPE_MOSTLY_F16: return "F16";
  3215. case LLAMA_FTYPE_MOSTLY_BF16: return "BF16";
  3216. case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
  3217. case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
  3218. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  3219. return "Q4_1, some F16";
  3220. case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
  3221. case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
  3222. case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
  3223. // K-quants
  3224. case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium";
  3225. case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small";
  3226. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
  3227. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
  3228. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
  3229. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
  3230. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
  3231. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
  3232. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
  3233. case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
  3234. case LLAMA_FTYPE_MOSTLY_IQ2_XXS:return "IQ2_XXS - 2.0625 bpw";
  3235. case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw";
  3236. case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw";
  3237. case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw";
  3238. case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw";
  3239. case LLAMA_FTYPE_MOSTLY_IQ3_XXS:return "IQ3_XXS - 3.0625 bpw";
  3240. case LLAMA_FTYPE_MOSTLY_IQ1_S :return "IQ1_S - 1.5625 bpw";
  3241. case LLAMA_FTYPE_MOSTLY_IQ1_M :return "IQ1_M - 1.75 bpw";
  3242. case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw";
  3243. case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
  3244. case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
  3245. case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
  3246. default: return "unknown, may not work";
  3247. }
  3248. }
  3249. static const char * llama_model_type_name(e_model type) {
  3250. switch (type) {
  3251. case MODEL_22M: return "22M";
  3252. case MODEL_33M: return "33M";
  3253. case MODEL_109M: return "109M";
  3254. case MODEL_137M: return "137M";
  3255. case MODEL_0_5B: return "0.5B";
  3256. case MODEL_1B: return "1B";
  3257. case MODEL_2B: return "2B";
  3258. case MODEL_3B: return "3B";
  3259. case MODEL_7B: return "7B";
  3260. case MODEL_8B: return "8B";
  3261. case MODEL_12B: return "12B";
  3262. case MODEL_13B: return "13B";
  3263. case MODEL_14B: return "14B";
  3264. case MODEL_15B: return "15B";
  3265. case MODEL_20B: return "20B";
  3266. case MODEL_30B: return "30B";
  3267. case MODEL_34B: return "34B";
  3268. case MODEL_35B: return "35B";
  3269. case MODEL_40B: return "40B";
  3270. case MODEL_65B: return "65B";
  3271. case MODEL_70B: return "70B";
  3272. case MODEL_314B: return "314B";
  3273. case MODEL_SMALL: return "0.1B";
  3274. case MODEL_MEDIUM: return "0.4B";
  3275. case MODEL_LARGE: return "0.8B";
  3276. case MODEL_XL: return "1.5B";
  3277. case MODEL_A2_7B: return "A2.7B";
  3278. case MODEL_8x7B: return "8x7B";
  3279. case MODEL_8x22B: return "8x22B";
  3280. case MODEL_16x12B: return "16x12B";
  3281. default: return "?B";
  3282. }
  3283. }
  3284. static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
  3285. switch (type) {
  3286. case LLAMA_VOCAB_TYPE_NONE: return "no vocab";
  3287. case LLAMA_VOCAB_TYPE_SPM: return "SPM";
  3288. case LLAMA_VOCAB_TYPE_BPE: return "BPE";
  3289. case LLAMA_VOCAB_TYPE_WPM: return "WPM";
  3290. default: return "unknown";
  3291. }
  3292. }
  3293. static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
  3294. model.arch = ml.get_arch();
  3295. if (model.arch == LLM_ARCH_UNKNOWN) {
  3296. throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
  3297. }
  3298. }
  3299. static void llm_load_hparams(
  3300. llama_model_loader & ml,
  3301. llama_model & model) {
  3302. auto & hparams = model.hparams;
  3303. const gguf_context * ctx = ml.meta;
  3304. // get metadata as string
  3305. for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
  3306. enum gguf_type type = gguf_get_kv_type(ctx, i);
  3307. if (type == GGUF_TYPE_ARRAY) {
  3308. continue;
  3309. }
  3310. const char * name = gguf_get_key(ctx, i);
  3311. const std::string value = gguf_kv_to_str(ctx, i);
  3312. model.gguf_kv.emplace(name, value);
  3313. }
  3314. // get general kv
  3315. ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
  3316. // get hparams kv
  3317. ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
  3318. // everything past this point is not vocab-related
  3319. if (hparams.vocab_only) {
  3320. return;
  3321. }
  3322. ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
  3323. ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
  3324. ml.get_key(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
  3325. ml.get_key(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head);
  3326. ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer);
  3327. ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
  3328. ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
  3329. GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
  3330. GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
  3331. if (hparams.n_expert > 0) {
  3332. GGML_ASSERT(hparams.n_expert_used > 0);
  3333. } else {
  3334. GGML_ASSERT(hparams.n_expert_used == 0);
  3335. }
  3336. // n_head_kv is optional, default to n_head
  3337. hparams.n_head_kv = hparams.n_head;
  3338. ml.get_key(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv, false);
  3339. bool rope_finetuned = false;
  3340. ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
  3341. hparams.rope_finetuned = rope_finetuned;
  3342. hparams.n_yarn_orig_ctx = hparams.n_ctx_train;
  3343. ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_yarn_orig_ctx, false);
  3344. // rope_freq_base (optional)
  3345. hparams.rope_freq_base_train = 10000.0f;
  3346. ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
  3347. std::string rope_scaling("linear");
  3348. ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
  3349. hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
  3350. GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED);
  3351. // rope_freq_scale (inverse of the kv) is optional
  3352. float ropescale = 0.0f;
  3353. if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
  3354. // try the old key name
  3355. ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
  3356. }
  3357. hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
  3358. // sanity check for n_rot (optional)
  3359. {
  3360. hparams.n_rot = (hparams.n_head == 0) ? 0 : hparams.n_embd / hparams.n_head;
  3361. ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
  3362. if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
  3363. if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
  3364. throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
  3365. }
  3366. }
  3367. // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
  3368. // gpt-j n_rot = rotary_dim
  3369. }
  3370. hparams.n_embd_head_k = (hparams.n_head == 0) ? 0 : hparams.n_embd / hparams.n_head;
  3371. ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
  3372. hparams.n_embd_head_v = (hparams.n_head == 0) ? 0 : hparams.n_embd / hparams.n_head;
  3373. ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
  3374. // arch-specific KVs
  3375. switch (model.arch) {
  3376. case LLM_ARCH_LLAMA:
  3377. {
  3378. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3379. if (hparams.n_expert == 8) {
  3380. switch (hparams.n_layer) {
  3381. case 32: model.type = e_model::MODEL_8x7B; break;
  3382. case 56: model.type = e_model::MODEL_8x22B; break;
  3383. default: model.type = e_model::MODEL_UNKNOWN;
  3384. }
  3385. } else {
  3386. switch (hparams.n_layer) {
  3387. case 22: model.type = e_model::MODEL_1B; break;
  3388. case 26: model.type = e_model::MODEL_3B; break;
  3389. case 32: model.type = hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B; break;
  3390. case 40: model.type = e_model::MODEL_13B; break;
  3391. case 48: model.type = e_model::MODEL_34B; break;
  3392. case 60: model.type = e_model::MODEL_30B; break;
  3393. case 80: model.type = hparams.n_head == hparams.n_head_kv ? e_model::MODEL_65B : e_model::MODEL_70B; break;
  3394. default: model.type = e_model::MODEL_UNKNOWN;
  3395. }
  3396. }
  3397. } break;
  3398. case LLM_ARCH_MINICPM:
  3399. {
  3400. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3401. switch (hparams.n_layer) {
  3402. case 40: model.type = e_model::MODEL_2B; break;
  3403. default: model.type = e_model::MODEL_UNKNOWN;
  3404. }
  3405. } break;
  3406. case LLM_ARCH_GROK:
  3407. {
  3408. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3409. switch (hparams.n_layer) {
  3410. case 64: model.type = e_model::MODEL_314B; break;
  3411. default: model.type = e_model::MODEL_UNKNOWN;
  3412. }
  3413. } break;
  3414. case LLM_ARCH_FALCON:
  3415. {
  3416. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3417. switch (hparams.n_layer) {
  3418. case 32: model.type = e_model::MODEL_7B; break;
  3419. case 60: model.type = e_model::MODEL_40B; break;
  3420. default: model.type = e_model::MODEL_UNKNOWN;
  3421. }
  3422. } break;
  3423. case LLM_ARCH_BAICHUAN:
  3424. {
  3425. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3426. switch (hparams.n_layer) {
  3427. case 32: model.type = e_model::MODEL_7B; break;
  3428. case 40: model.type = e_model::MODEL_13B; break;
  3429. default: model.type = e_model::MODEL_UNKNOWN;
  3430. }
  3431. if (model.type == e_model::MODEL_13B) {
  3432. // TODO: become GGUF KV parameter
  3433. hparams.f_max_alibi_bias = 8.0f;
  3434. }
  3435. } break;
  3436. case LLM_ARCH_STARCODER:
  3437. {
  3438. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3439. switch (hparams.n_layer) {
  3440. case 24: model.type = e_model::MODEL_1B; break;
  3441. case 36: model.type = e_model::MODEL_3B; break;
  3442. case 42: model.type = e_model::MODEL_7B; break;
  3443. case 40: model.type = e_model::MODEL_15B; break;
  3444. default: model.type = e_model::MODEL_UNKNOWN;
  3445. }
  3446. } break;
  3447. case LLM_ARCH_REFACT:
  3448. {
  3449. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3450. switch (hparams.n_layer) {
  3451. case 32: model.type = e_model::MODEL_1B; break;
  3452. default: model.type = e_model::MODEL_UNKNOWN;
  3453. }
  3454. // TODO: become GGUF KV parameter
  3455. hparams.f_max_alibi_bias = 8.0f;
  3456. } break;
  3457. case LLM_ARCH_BERT:
  3458. {
  3459. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3460. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  3461. ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
  3462. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
  3463. switch (hparams.n_layer) {
  3464. case 3:
  3465. model.type = e_model::MODEL_17M; break; // bge-micro
  3466. case 6:
  3467. model.type = e_model::MODEL_22M; break; // MiniLM-L6
  3468. case 12:
  3469. switch (hparams.n_embd) {
  3470. case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small
  3471. case 768: model.type = e_model::MODEL_109M; break; // bge-base
  3472. } break;
  3473. case 24:
  3474. model.type = e_model::MODEL_335M; break; // bge-large
  3475. }
  3476. } break;
  3477. case LLM_ARCH_JINA_BERT_V2:
  3478. {
  3479. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3480. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  3481. ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
  3482. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
  3483. hparams.f_max_alibi_bias = 8.0f;
  3484. switch (hparams.n_layer) {
  3485. case 4: model.type = e_model::MODEL_33M; break; // jina-embeddings-small
  3486. case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base
  3487. }
  3488. } break;
  3489. case LLM_ARCH_NOMIC_BERT:
  3490. {
  3491. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3492. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  3493. ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
  3494. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
  3495. if (hparams.n_layer == 12 && hparams.n_embd == 768) {
  3496. model.type = e_model::MODEL_137M;
  3497. }
  3498. } break;
  3499. case LLM_ARCH_BLOOM:
  3500. {
  3501. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3502. switch (hparams.n_layer) {
  3503. case 24: model.type = e_model::MODEL_1B; break;
  3504. case 30:
  3505. switch (hparams.n_embd) {
  3506. case 2560: model.type = e_model::MODEL_3B; break;
  3507. case 4096: model.type = e_model::MODEL_7B; break;
  3508. } break;
  3509. }
  3510. // TODO: become GGUF KV parameter
  3511. hparams.f_max_alibi_bias = 8.0f;
  3512. } break;
  3513. case LLM_ARCH_MPT:
  3514. {
  3515. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3516. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  3517. ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
  3518. switch (hparams.n_layer) {
  3519. case 32: model.type = e_model::MODEL_7B; break;
  3520. case 48: model.type = e_model::MODEL_30B; break;
  3521. default: model.type = e_model::MODEL_UNKNOWN;
  3522. }
  3523. } break;
  3524. case LLM_ARCH_STABLELM:
  3525. {
  3526. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3527. switch (hparams.n_layer) {
  3528. case 24: model.type = e_model::MODEL_1B; break;
  3529. case 32: model.type = e_model::MODEL_3B; break;
  3530. case 40: model.type = e_model::MODEL_12B; break;
  3531. default: model.type = e_model::MODEL_UNKNOWN;
  3532. }
  3533. } break;
  3534. case LLM_ARCH_QWEN:
  3535. {
  3536. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3537. switch (hparams.n_layer) {
  3538. case 32: model.type = e_model::MODEL_7B; break;
  3539. case 40: model.type = e_model::MODEL_13B; break;
  3540. default: model.type = e_model::MODEL_UNKNOWN;
  3541. }
  3542. } break;
  3543. case LLM_ARCH_QWEN2:
  3544. {
  3545. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3546. switch (hparams.n_layer) {
  3547. case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break;
  3548. case 32: model.type = e_model::MODEL_7B; break;
  3549. case 40: model.type = hparams.n_head == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break;
  3550. case 80: model.type = e_model::MODEL_70B; break;
  3551. default: model.type = e_model::MODEL_UNKNOWN;
  3552. }
  3553. } break;
  3554. case LLM_ARCH_QWEN2MOE:
  3555. {
  3556. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3557. switch (hparams.n_layer) {
  3558. case 24: model.type = e_model::MODEL_A2_7B; break;
  3559. default: model.type = e_model::MODEL_UNKNOWN;
  3560. }
  3561. } break;
  3562. case LLM_ARCH_PHI2:
  3563. {
  3564. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3565. switch (hparams.n_layer) {
  3566. case 24: model.type = e_model::MODEL_1B; break;
  3567. case 32: model.type = e_model::MODEL_3B; break;
  3568. default: model.type = e_model::MODEL_UNKNOWN;
  3569. }
  3570. } break;
  3571. case LLM_ARCH_PHI3:
  3572. {
  3573. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3574. switch (hparams.n_layer) {
  3575. case 24: model.type = e_model::MODEL_1B; break;
  3576. case 32: model.type = e_model::MODEL_3B; break;
  3577. default: model.type = e_model::MODEL_UNKNOWN;
  3578. }
  3579. } break;
  3580. case LLM_ARCH_PLAMO:
  3581. {
  3582. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3583. switch (hparams.n_layer) {
  3584. case 40: model.type = e_model::MODEL_13B; break;
  3585. default: model.type = e_model::MODEL_UNKNOWN;
  3586. }
  3587. } break;
  3588. case LLM_ARCH_GPT2:
  3589. {
  3590. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3591. switch (hparams.n_layer) {
  3592. case 12: model.type = e_model::MODEL_SMALL; break;
  3593. case 24: model.type = e_model::MODEL_MEDIUM; break;
  3594. case 36: model.type = e_model::MODEL_LARGE; break;
  3595. case 48: model.type = e_model::MODEL_XL; break;
  3596. default: model.type = e_model::MODEL_UNKNOWN;
  3597. }
  3598. } break;
  3599. case LLM_ARCH_CODESHELL:
  3600. {
  3601. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3602. switch (hparams.n_layer) {
  3603. case 42: model.type = e_model::MODEL_SMALL; break;
  3604. default: model.type = e_model::MODEL_UNKNOWN;
  3605. }
  3606. } break;
  3607. case LLM_ARCH_ORION:
  3608. {
  3609. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3610. switch (hparams.n_layer) {
  3611. case 40: model.type = e_model::MODEL_14B; break;
  3612. default: model.type = e_model::MODEL_UNKNOWN;
  3613. }
  3614. } break;
  3615. case LLM_ARCH_INTERNLM2:
  3616. {
  3617. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3618. switch (hparams.n_layer) {
  3619. case 32: model.type = e_model::MODEL_7B; break;
  3620. case 48: model.type = e_model::MODEL_20B; break;
  3621. default: model.type = e_model::MODEL_UNKNOWN;
  3622. }
  3623. } break;
  3624. case LLM_ARCH_GEMMA:
  3625. {
  3626. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3627. switch (hparams.n_layer) {
  3628. case 18: model.type = e_model::MODEL_2B; break;
  3629. case 28: model.type = e_model::MODEL_7B; break;
  3630. default: model.type = e_model::MODEL_UNKNOWN;
  3631. }
  3632. } break;
  3633. case LLM_ARCH_STARCODER2:
  3634. {
  3635. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3636. switch (hparams.n_layer) {
  3637. case 30: model.type = e_model::MODEL_3B; break;
  3638. case 32: model.type = e_model::MODEL_7B; break;
  3639. case 40: model.type = e_model::MODEL_15B; break;
  3640. default: model.type = e_model::MODEL_UNKNOWN;
  3641. }
  3642. } break;
  3643. case LLM_ARCH_MAMBA:
  3644. {
  3645. ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv);
  3646. ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner);
  3647. ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state);
  3648. ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
  3649. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3650. switch (hparams.n_layer) {
  3651. case 24:
  3652. switch (hparams.n_embd) {
  3653. case 768: model.type = e_model::MODEL_SMALL; break;
  3654. default: model.type = e_model::MODEL_UNKNOWN;
  3655. } break;
  3656. case 48:
  3657. switch (hparams.n_embd) {
  3658. case 1024: model.type = e_model::MODEL_MEDIUM; break;
  3659. case 1536: model.type = e_model::MODEL_LARGE; break;
  3660. case 2048: model.type = e_model::MODEL_XL; break;
  3661. default: model.type = e_model::MODEL_UNKNOWN;
  3662. } break;
  3663. case 64:
  3664. switch (hparams.n_embd) {
  3665. case 2560: model.type = e_model::MODEL_3B; break;
  3666. default: model.type = e_model::MODEL_UNKNOWN;
  3667. } break;
  3668. default: model.type = e_model::MODEL_UNKNOWN;
  3669. }
  3670. } break;
  3671. case LLM_ARCH_XVERSE:
  3672. {
  3673. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3674. switch (hparams.n_layer) {
  3675. case 32: model.type = e_model::MODEL_7B; break;
  3676. case 40: model.type = e_model::MODEL_13B; break;
  3677. case 80: model.type = e_model::MODEL_65B; break;
  3678. default: model.type = e_model::MODEL_UNKNOWN;
  3679. }
  3680. } break;
  3681. case LLM_ARCH_COMMAND_R:
  3682. {
  3683. ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
  3684. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3685. switch (hparams.n_layer) {
  3686. case 40: model.type = e_model::MODEL_35B; break;
  3687. default: model.type = e_model::MODEL_UNKNOWN;
  3688. }
  3689. } break;
  3690. case LLM_ARCH_DBRX:
  3691. {
  3692. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3693. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv);
  3694. switch (hparams.n_layer) {
  3695. case 40: model.type = e_model::MODEL_16x12B; break;
  3696. default: model.type = e_model::MODEL_UNKNOWN;
  3697. }
  3698. } break;
  3699. case LLM_ARCH_OLMO:
  3700. {
  3701. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3702. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  3703. switch (hparams.n_layer) {
  3704. case 22: model.type = e_model::MODEL_1B; break;
  3705. case 32: model.type = e_model::MODEL_7B; break;
  3706. case 80: model.type = e_model::MODEL_70B; break;
  3707. default: model.type = e_model::MODEL_UNKNOWN;
  3708. }
  3709. } break;
  3710. default: (void)0;
  3711. }
  3712. model.ftype = ml.ftype;
  3713. if (hparams.f_max_alibi_bias > 0.0f) {
  3714. hparams.use_alibi = true;
  3715. }
  3716. hparams.rope_type = llama_rope_type(&model);
  3717. }
  3718. // TODO: This should probably be in llama.h
  3719. static std::vector<llama_vocab::id> llama_tokenize_internal(
  3720. const llama_vocab & vocab, std::string raw_text, bool add_special, bool parse_special = false
  3721. );
  3722. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch);
  3723. static void llm_load_vocab(
  3724. llama_model_loader & ml,
  3725. llama_model & model) {
  3726. auto & vocab = model.vocab;
  3727. struct gguf_context * ctx = ml.meta;
  3728. const auto kv = LLM_KV(model.arch);
  3729. // determine vocab type
  3730. {
  3731. std::string tokenizer_model;
  3732. std::string tokenizer_pre;
  3733. ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model);
  3734. ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
  3735. if (tokenizer_model == "no_vocab") {
  3736. vocab.type = LLAMA_VOCAB_TYPE_NONE;
  3737. // default special tokens
  3738. vocab.special_bos_id = -1;
  3739. vocab.special_eos_id = -1;
  3740. vocab.special_unk_id = -1;
  3741. vocab.special_sep_id = -1;
  3742. vocab.special_pad_id = -1;
  3743. vocab.special_cls_id = -1;
  3744. vocab.special_mask_id = -1;
  3745. vocab.linefeed_id = -1;
  3746. return;
  3747. } else if (tokenizer_model == "llama") {
  3748. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  3749. // default special tokens
  3750. vocab.special_bos_id = 1;
  3751. vocab.special_eos_id = 2;
  3752. vocab.special_unk_id = 0;
  3753. vocab.special_sep_id = -1;
  3754. vocab.special_pad_id = -1;
  3755. vocab.special_cls_id = -1;
  3756. vocab.special_mask_id = -1;
  3757. // For Fill-In-the-Middle (FIM)/infill models which where converted
  3758. // prior to support of FIM special tokens in GGUF, the following
  3759. // will allow those models to continue to work. The general names
  3760. // of the known models are currently CodeLlama (LLM_ARCH_LLAMA) and
  3761. // CodeGemma (LLM_ARCH_GEMMA). This can potentially be removed once
  3762. // new versions of these models have been published.
  3763. std::string gen_name;
  3764. ml.get_key(LLM_KV_GENERAL_NAME, gen_name, false);
  3765. std::transform(gen_name.begin(), gen_name.end(), gen_name.begin(),
  3766. [](unsigned char c){ return std::tolower(c); });
  3767. if (gen_name.find("code") != std::string::npos) {
  3768. if (model.arch == LLM_ARCH_LLAMA) {
  3769. vocab.special_prefix_id = 32007;
  3770. vocab.special_suffix_id = 32008;
  3771. vocab.special_middle_id = 32009;
  3772. vocab.special_eot_id = 32010;
  3773. } else if (model.arch == LLM_ARCH_GEMMA) {
  3774. vocab.special_prefix_id = 67;
  3775. vocab.special_suffix_id = 69;
  3776. vocab.special_middle_id = 68;
  3777. // TODO: this is not EOT, it is "file separator" token, needs fix
  3778. // https://huggingface.co/google/codegemma-7b-it/blob/9b1d9231388358c04d90bd003458f5070d97db44/tokenizer_config.json#L565-L572
  3779. //vocab.special_eot_id = 70;
  3780. vocab.special_eot_id = 107;
  3781. }
  3782. }
  3783. const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
  3784. if (add_space_prefix_keyidx != -1) {
  3785. vocab.add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
  3786. } // The default value of add_space_prefix is true.
  3787. } else if (tokenizer_model == "bert") {
  3788. vocab.type = LLAMA_VOCAB_TYPE_WPM;
  3789. // default special tokens
  3790. vocab.special_bos_id = -1;
  3791. vocab.special_eos_id = -1;
  3792. vocab.special_unk_id = 100;
  3793. vocab.special_sep_id = 102;
  3794. vocab.special_pad_id = 0;
  3795. vocab.special_cls_id = 101;
  3796. vocab.special_mask_id = 103;
  3797. vocab.add_space_prefix = false;
  3798. } else {
  3799. if (tokenizer_model == "gpt2") {
  3800. vocab.type = LLAMA_VOCAB_TYPE_BPE;
  3801. } else {
  3802. LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_model.c_str());
  3803. LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__);
  3804. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  3805. return;
  3806. }
  3807. // read bpe merges and populate bpe ranks
  3808. const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
  3809. if (merges_keyidx == -1) {
  3810. throw std::runtime_error("cannot find tokenizer merges in model file\n");
  3811. }
  3812. const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
  3813. for (int i = 0; i < n_merges; i++) {
  3814. const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
  3815. GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
  3816. std::string first;
  3817. std::string second;
  3818. const size_t pos = word.find(' ', 1);
  3819. if (pos != std::string::npos) {
  3820. first = word.substr(0, pos);
  3821. second = word.substr(pos + 1);
  3822. }
  3823. vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
  3824. }
  3825. // default special tokens
  3826. vocab.special_bos_id = 11;
  3827. vocab.special_eos_id = 11;
  3828. vocab.special_unk_id = -1;
  3829. vocab.special_sep_id = -1;
  3830. vocab.special_pad_id = -1;
  3831. vocab.special_cls_id = -1;
  3832. vocab.special_mask_id = -1;
  3833. }
  3834. // for now, only BPE models have pre-tokenizers
  3835. if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
  3836. if (tokenizer_pre.empty()) {
  3837. LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
  3838. LLAMA_LOG_WARN("%s: \n", __func__);
  3839. LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
  3840. LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__);
  3841. LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__);
  3842. LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
  3843. LLAMA_LOG_WARN("%s: \n", __func__);
  3844. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  3845. } else if (
  3846. tokenizer_pre == "default") {
  3847. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  3848. } else if (
  3849. tokenizer_pre == "llama3" ||
  3850. tokenizer_pre == "llama-v3" ||
  3851. tokenizer_pre == "llama-bpe") {
  3852. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3;
  3853. } else if (
  3854. tokenizer_pre == "deepseek-llm") {
  3855. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM;
  3856. } else if (
  3857. tokenizer_pre == "deepseek-coder") {
  3858. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
  3859. } else if (
  3860. tokenizer_pre == "falcon") {
  3861. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON;
  3862. } else if (
  3863. tokenizer_pre == "mpt") {
  3864. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT;
  3865. } else if (
  3866. tokenizer_pre == "starcoder") {
  3867. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
  3868. } else if (
  3869. tokenizer_pre == "gpt-2" ||
  3870. tokenizer_pre == "jina-es" ||
  3871. tokenizer_pre == "jina-de" ||
  3872. tokenizer_pre == "jina-v2-es" ||
  3873. tokenizer_pre == "jina-v2-de") {
  3874. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
  3875. } else if (
  3876. tokenizer_pre == "refact") {
  3877. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT;
  3878. } else if (
  3879. tokenizer_pre == "command-r") {
  3880. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
  3881. } else if (
  3882. tokenizer_pre == "qwen2") {
  3883. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
  3884. } else if (
  3885. tokenizer_pre == "stablelm2") {
  3886. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2;
  3887. } else if (
  3888. tokenizer_pre == "olmo") {
  3889. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
  3890. } else if (
  3891. tokenizer_pre == "dbrx") {
  3892. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
  3893. } else {
  3894. throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
  3895. }
  3896. } else {
  3897. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  3898. }
  3899. }
  3900. const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
  3901. if (token_idx == -1) {
  3902. throw std::runtime_error("cannot find tokenizer vocab in model file\n");
  3903. }
  3904. const float * scores = nullptr;
  3905. const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
  3906. if (score_idx != -1) {
  3907. scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
  3908. }
  3909. const int * toktypes = nullptr;
  3910. const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
  3911. if (toktype_idx != -1) {
  3912. toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
  3913. }
  3914. const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
  3915. vocab.id_to_token.resize(n_vocab);
  3916. for (uint32_t i = 0; i < n_vocab; i++) {
  3917. std::string word = gguf_get_arr_str(ctx, token_idx, i);
  3918. GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
  3919. vocab.token_to_id[word] = i;
  3920. auto & token_data = vocab.id_to_token[i];
  3921. token_data.text = std::move(word);
  3922. token_data.score = scores ? scores[i] : 0.0f;
  3923. token_data.type = toktypes ? (llama_token_type) toktypes[i] : LLAMA_TOKEN_TYPE_NORMAL;
  3924. }
  3925. GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
  3926. // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
  3927. if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
  3928. try {
  3929. vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
  3930. } catch (const std::exception & e) {
  3931. LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what());
  3932. vocab.linefeed_id = vocab.special_pad_id;
  3933. }
  3934. } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
  3935. vocab.linefeed_id = vocab.special_pad_id;
  3936. } else {
  3937. const std::vector<int> ids = llama_tokenize_internal(vocab, "\xC4\x8A", false); // U+010A
  3938. GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
  3939. vocab.linefeed_id = ids[0];
  3940. }
  3941. // special tokens
  3942. {
  3943. const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
  3944. { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
  3945. { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
  3946. { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
  3947. { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
  3948. { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
  3949. { LLM_KV_TOKENIZER_CLS_ID, vocab.special_cls_id },
  3950. { LLM_KV_TOKENIZER_MASK_ID, vocab.special_mask_id },
  3951. { LLM_KV_TOKENIZER_PREFIX_ID, vocab.special_prefix_id },
  3952. { LLM_KV_TOKENIZER_SUFFIX_ID, vocab.special_suffix_id },
  3953. { LLM_KV_TOKENIZER_MIDDLE_ID, vocab.special_middle_id },
  3954. { LLM_KV_TOKENIZER_EOT_ID, vocab.special_eot_id },
  3955. };
  3956. for (const auto & it : special_token_types) {
  3957. const std::string & key = kv(std::get<0>(it));
  3958. int32_t & id = std::get<1>(it);
  3959. uint32_t new_id;
  3960. if (!ml.get_key(std::get<0>(it), new_id, false)) {
  3961. continue;
  3962. }
  3963. if (new_id >= vocab.id_to_token.size()) {
  3964. LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
  3965. __func__, key.c_str(), new_id, id);
  3966. } else {
  3967. id = new_id;
  3968. }
  3969. }
  3970. // Handle add_bos_token and add_eos_token
  3971. {
  3972. bool temp = true;
  3973. if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
  3974. vocab.special_add_bos = int(temp);
  3975. }
  3976. if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
  3977. vocab.special_add_eos = int(temp);
  3978. }
  3979. }
  3980. // find EOT token: "<|eot_id|>", "<|im_end|>", "<end_of_turn>", etc.
  3981. //
  3982. // TODO: convert scripts should provide this token through the KV metadata LLAMA_KV_TOKENIZER_EOT_ID
  3983. // for now, we apply this workaround to find the EOT token based on its text
  3984. if (vocab.special_eot_id == -1) {
  3985. for (const auto & t : vocab.token_to_id) {
  3986. if (
  3987. // TODO: gemma "<end_of_turn>" is exported as a normal token, so the following check does not work
  3988. // need to fix convert script
  3989. //vocab.id_to_token[t.second].type == LLAMA_TOKEN_TYPE_CONTROL &&
  3990. (t.first == "<|eot_id|>" ||
  3991. t.first == "<|im_end|>" ||
  3992. t.first == "<|end|>" ||
  3993. t.first == "<end_of_turn>" ||
  3994. t.first == "<|endoftext|>"
  3995. )
  3996. ) {
  3997. vocab.special_eot_id = t.second;
  3998. break;
  3999. }
  4000. }
  4001. }
  4002. }
  4003. // build special tokens cache
  4004. {
  4005. // TODO: It is unclear (to me) at this point, whether special tokes are guaranteed to be of a deterministic type,
  4006. // and will always be correctly labeled in 'added_tokens.json' etc.
  4007. // The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
  4008. // to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
  4009. // are special tokens.
  4010. // From testing, this appears to correlate 1:1 with special tokens.
  4011. //
  4012. // Counting special tokens and verifying in only one direction
  4013. // is sufficient to detect difference in those two sets.
  4014. //
  4015. uint32_t special_tokens_count_by_type = 0;
  4016. uint32_t special_tokens_count_from_verification = 0;
  4017. bool special_tokens_definition_mismatch = false;
  4018. for (const auto & t : vocab.token_to_id) {
  4019. const auto & token = t.first;
  4020. const auto & id = t.second;
  4021. // Count all non-normal tokens in the vocab while iterating
  4022. if (vocab.id_to_token[id].type != LLAMA_TOKEN_TYPE_NORMAL) {
  4023. special_tokens_count_by_type++;
  4024. }
  4025. // Skip single character tokens
  4026. if (token.length() > 1) {
  4027. bool is_tokenizable = false;
  4028. // Split token string representation in two, in all possible ways
  4029. // and check if both halves can be matched to a valid token
  4030. for (unsigned i = 1; i < token.length();) {
  4031. const auto left = token.substr(0, i);
  4032. const auto right = token.substr(i);
  4033. // check if we didnt partition in the middle of a utf sequence
  4034. auto utf = utf8_len(left.at(left.length() - 1));
  4035. if (utf == 1) {
  4036. if (vocab.token_to_id.find(left) != vocab.token_to_id.end() &&
  4037. vocab.token_to_id.find(right) != vocab.token_to_id.end() ) {
  4038. is_tokenizable = true;
  4039. break;
  4040. }
  4041. i++;
  4042. } else {
  4043. // skip over the rest of multibyte utf sequence
  4044. i += utf - 1;
  4045. }
  4046. }
  4047. if (!is_tokenizable) {
  4048. // Some tokens are multibyte, but they are utf sequences with equivalent text length of 1
  4049. // it's faster to re-filter them here, since there are way less candidates now
  4050. // Calculate a total "utf" length of a token string representation
  4051. size_t utf8_str_len = 0;
  4052. for (unsigned i = 0; i < token.length();) {
  4053. utf8_str_len++;
  4054. i += utf8_len(token.at(i));
  4055. }
  4056. // And skip the ones which are one character
  4057. if (utf8_str_len > 1) {
  4058. // At this point what we have left are special tokens only
  4059. vocab.special_tokens_cache[token] = id;
  4060. // Count manually found special tokens
  4061. special_tokens_count_from_verification++;
  4062. // If this manually found special token is not marked as such, flag a mismatch
  4063. if (vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL) {
  4064. special_tokens_definition_mismatch = true;
  4065. }
  4066. }
  4067. }
  4068. }
  4069. }
  4070. if (special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type) {
  4071. LLAMA_LOG_WARN("%s: mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
  4072. __func__,
  4073. special_tokens_count_from_verification, vocab.id_to_token.size(),
  4074. special_tokens_count_by_type, vocab.id_to_token.size()
  4075. );
  4076. } else {
  4077. LLAMA_LOG_INFO("%s: special tokens definition check successful ( %u/%zu ).\n",
  4078. __func__,
  4079. special_tokens_count_from_verification, vocab.id_to_token.size()
  4080. );
  4081. }
  4082. }
  4083. }
  4084. static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
  4085. const auto & hparams = model.hparams;
  4086. const auto & vocab = model.vocab;
  4087. const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
  4088. // hparams
  4089. LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
  4090. LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch));
  4091. LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, llama_model_vocab_type_name(vocab.type));
  4092. LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  4093. LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
  4094. LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
  4095. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  4096. LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
  4097. LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
  4098. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  4099. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot);
  4100. LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k);
  4101. LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v);
  4102. LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
  4103. LLAMA_LOG_INFO("%s: n_embd_k_gqa = %u\n", __func__, hparams.n_embd_k_gqa());
  4104. LLAMA_LOG_INFO("%s: n_embd_v_gqa = %u\n", __func__, hparams.n_embd_v_gqa());
  4105. LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
  4106. LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
  4107. LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
  4108. LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
  4109. LLAMA_LOG_INFO("%s: f_logit_scale = %.1e\n", __func__, hparams.f_logit_scale);
  4110. LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
  4111. LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
  4112. LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
  4113. LLAMA_LOG_INFO("%s: causal attn = %d\n", __func__, hparams.causal_attn);
  4114. LLAMA_LOG_INFO("%s: pooling type = %d\n", __func__, hparams.pooling_type);
  4115. LLAMA_LOG_INFO("%s: rope type = %d\n", __func__, hparams.rope_type);
  4116. LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type);
  4117. LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
  4118. LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
  4119. LLAMA_LOG_INFO("%s: n_yarn_orig_ctx = %u\n", __func__, hparams.n_yarn_orig_ctx);
  4120. LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
  4121. LLAMA_LOG_INFO("%s: ssm_d_conv = %u\n", __func__, hparams.ssm_d_conv);
  4122. LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner);
  4123. LLAMA_LOG_INFO("%s: ssm_d_state = %u\n", __func__, hparams.ssm_d_state);
  4124. LLAMA_LOG_INFO("%s: ssm_dt_rank = %u\n", __func__, hparams.ssm_dt_rank);
  4125. LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
  4126. LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
  4127. if (ml.n_elements >= 1e12) {
  4128. LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, ml.n_elements*1e-12);
  4129. } else if (ml.n_elements >= 1e9) {
  4130. LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
  4131. } else if (ml.n_elements >= 1e6) {
  4132. LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, ml.n_elements*1e-6);
  4133. } else {
  4134. LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
  4135. }
  4136. if (ml.n_bytes < GiB) {
  4137. LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  4138. } else {
  4139. LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  4140. }
  4141. // general kv
  4142. LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
  4143. // special tokens
  4144. if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
  4145. if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
  4146. if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
  4147. if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
  4148. if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
  4149. if (vocab.special_cls_id != -1) { LLAMA_LOG_INFO( "%s: CLS token = %d '%s'\n", __func__, vocab.special_cls_id, vocab.id_to_token[vocab.special_cls_id].text.c_str() ); }
  4150. if (vocab.special_mask_id != -1) { LLAMA_LOG_INFO( "%s: MASK token = %d '%s'\n", __func__, vocab.special_mask_id, vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
  4151. if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
  4152. if (vocab.special_prefix_id != -1) { LLAMA_LOG_INFO( "%s: PRE token = %d '%s'\n", __func__, vocab.special_prefix_id, vocab.id_to_token[vocab.special_prefix_id].text.c_str() ); }
  4153. if (vocab.special_suffix_id != -1) { LLAMA_LOG_INFO( "%s: SUF token = %d '%s'\n", __func__, vocab.special_suffix_id, vocab.id_to_token[vocab.special_suffix_id].text.c_str() ); }
  4154. if (vocab.special_middle_id != -1) { LLAMA_LOG_INFO( "%s: MID token = %d '%s'\n", __func__, vocab.special_middle_id, vocab.id_to_token[vocab.special_middle_id].text.c_str() ); }
  4155. if (vocab.special_eot_id != -1) { LLAMA_LOG_INFO( "%s: EOT token = %d '%s'\n", __func__, vocab.special_eot_id, vocab.id_to_token[vocab.special_eot_id].text.c_str() ); }
  4156. }
  4157. // Returns false if cancelled by progress_callback
  4158. static bool llm_load_tensors(
  4159. llama_model_loader & ml,
  4160. llama_model & model,
  4161. int n_gpu_layers,
  4162. enum llama_split_mode split_mode,
  4163. int main_gpu,
  4164. const float * tensor_split,
  4165. bool use_mlock,
  4166. llama_progress_callback progress_callback,
  4167. void * progress_callback_user_data) {
  4168. model.t_start_us = ggml_time_us();
  4169. auto & hparams = model.hparams;
  4170. #ifdef GGML_USE_SYCL
  4171. // disable MoE with SYCL until mul_mat_id is updated
  4172. if (hparams.n_expert > 0) {
  4173. n_gpu_layers = 0;
  4174. }
  4175. #endif
  4176. model.split_mode = split_mode;
  4177. model.main_gpu = main_gpu;
  4178. model.n_gpu_layers = n_gpu_layers;
  4179. const int64_t n_layer = hparams.n_layer;
  4180. const int64_t i_gpu_start = std::max((int64_t) hparams.n_layer - n_gpu_layers, (int64_t) 0);
  4181. bool use_mmap_buffer = true;
  4182. // there is very little benefit to offloading the input layer, so always keep it on the CPU
  4183. model.buft_input = llama_default_buffer_type_cpu(true);
  4184. //model.buft_input = llama_default_buffer_type_offload(main_gpu);
  4185. model.buft_layer.resize(n_layer);
  4186. // assign cpu layers
  4187. for (int64_t i = 0; i < i_gpu_start; ++i) {
  4188. model.buft_layer[i] = llama_default_buffer_type_cpu(true);
  4189. }
  4190. if (split_mode == LLAMA_SPLIT_MODE_LAYER) {
  4191. // calculate the split points
  4192. int device_count = llama_get_device_count(model);
  4193. bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + device_count, [](float x) { return x == 0.0f; });
  4194. std::vector<float> splits(device_count);
  4195. if (all_zero) {
  4196. // default split, by free memory
  4197. for (int i = 0; i < device_count; ++i) {
  4198. splits[i] = llama_get_device_memory(model, i);
  4199. }
  4200. } else {
  4201. std::copy(tensor_split, tensor_split + device_count, splits.begin());
  4202. }
  4203. // sum and normalize the splits to get the split points
  4204. float split_sum = 0.0f;
  4205. for (int i = 0; i < device_count; ++i) {
  4206. split_sum += splits[i];
  4207. splits[i] = split_sum;
  4208. }
  4209. for (int i = 0; i < device_count; ++i) {
  4210. splits[i] /= split_sum;
  4211. }
  4212. // assign the repeating layers to the devices according to the splits
  4213. int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1);
  4214. for (int64_t i = i_gpu_start; i < n_layer; ++i) {
  4215. int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits.begin();
  4216. model.buft_layer[i] = llama_default_buffer_type_offload(model, layer_gpu);
  4217. }
  4218. // assign the output layer
  4219. if (n_gpu_layers > n_layer) {
  4220. int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(act_gpu_layers - 1)/act_gpu_layers) - splits.begin();
  4221. model.buft_output = llama_default_buffer_type_offload(model, layer_gpu);
  4222. } else {
  4223. model.buft_output = llama_default_buffer_type_cpu(true);
  4224. }
  4225. } else {
  4226. ggml_backend_buffer_type_t split_buft;
  4227. if (split_mode == LLAMA_SPLIT_MODE_ROW) {
  4228. split_buft = llama_default_buffer_type_split(model, main_gpu, tensor_split);
  4229. } else {
  4230. // LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_LAYER in backends where it is not supported
  4231. split_buft = llama_default_buffer_type_offload(model, main_gpu);
  4232. }
  4233. // assign the repeating layers
  4234. for (int64_t i = i_gpu_start; i < n_layer; ++i) {
  4235. model.buft_layer[i] = {
  4236. split_buft,
  4237. llama_default_buffer_type_offload(model, main_gpu)
  4238. };
  4239. }
  4240. // assign the output layer
  4241. if (n_gpu_layers > n_layer) {
  4242. model.buft_output = {
  4243. split_buft,
  4244. llama_default_buffer_type_offload(model, main_gpu)
  4245. };
  4246. } else {
  4247. model.buft_output = llama_default_buffer_type_cpu(true);
  4248. }
  4249. }
  4250. // count used buffer types
  4251. std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
  4252. buft_layer_count[model.buft_input.buft]++;
  4253. buft_layer_count[model.buft_input.buft_matrix]++;
  4254. buft_layer_count[model.buft_output.buft]++;
  4255. buft_layer_count[model.buft_output.buft_matrix]++;
  4256. for (int64_t i = 0; i < n_layer; ++i) {
  4257. buft_layer_count[model.buft_layer[i].buft]++;
  4258. buft_layer_count[model.buft_layer[i].buft_matrix]++;
  4259. }
  4260. // create one context per buffer type
  4261. size_t ctx_size = ggml_tensor_overhead()*(ml.n_tensors + 1); // +1 for models where tok_embd is duplicated as output
  4262. // for moe merged tensors
  4263. ctx_size += ggml_tensor_overhead()*n_layer*3;
  4264. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  4265. for (auto & it : buft_layer_count) {
  4266. struct ggml_init_params params = {
  4267. /*.mem_size =*/ ctx_size,
  4268. /*.mem_buffer =*/ NULL,
  4269. /*.no_alloc =*/ true,
  4270. };
  4271. ggml_context * ctx = ggml_init(params);
  4272. if (!ctx) {
  4273. throw std::runtime_error(format("failed to create context"));
  4274. }
  4275. ctx_map[it.first] = ctx;
  4276. model.ctxs.push_back(ctx);
  4277. }
  4278. LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0);
  4279. // create tensors for the weights
  4280. {
  4281. const int64_t n_embd = hparams.n_embd;
  4282. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  4283. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  4284. const int64_t n_embd_gqa = n_embd_v_gqa;
  4285. const int64_t n_vocab = hparams.n_vocab;
  4286. const int64_t n_vocab_type = hparams.n_vocab_type;
  4287. const int64_t n_ff = hparams.n_ff;
  4288. const int64_t n_expert = hparams.n_expert;
  4289. if (n_expert > 0 && hparams.n_expert_used == 0) {
  4290. throw std::runtime_error("model has expert layers but no expert layers are used");
  4291. }
  4292. GGML_ASSERT(n_embd_gqa == n_embd_k_gqa);
  4293. ggml_context * ctx_input = ctx_map.at(model.buft_input.buft);
  4294. ggml_context * ctx_output = ctx_map.at(model.buft_output.buft);
  4295. ggml_context * ctx_output_split = ctx_map.at(model.buft_output.buft_matrix);
  4296. auto ctx_for_layer = [&](int i) { return ctx_map.at(model.buft_layer[i].buft); };
  4297. auto ctx_for_layer_split = [&](int i) { return ctx_map.at(model.buft_layer[i].buft_matrix); };
  4298. model.layers.resize(n_layer);
  4299. const auto tn = LLM_TN(model.arch);
  4300. switch (model.arch) {
  4301. case LLM_ARCH_LLAMA:
  4302. case LLM_ARCH_REFACT:
  4303. case LLM_ARCH_MINICPM:
  4304. {
  4305. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4306. // output
  4307. {
  4308. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4309. if (model.arch != LLM_ARCH_MINICPM){
  4310. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
  4311. // if output is NULL, init from the input tok embed
  4312. if (model.output == NULL) {
  4313. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4314. ml.n_created--; // artificial tensor
  4315. ml.size_data += ggml_nbytes(model.output);
  4316. }
  4317. }
  4318. }
  4319. for (int i = 0; i < n_layer; ++i) {
  4320. ggml_context * ctx_layer = ctx_for_layer(i);
  4321. ggml_context * ctx_split = ctx_for_layer_split(i);
  4322. auto & layer = model.layers[i];
  4323. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4324. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4325. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4326. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4327. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4328. // optional bias tensors
  4329. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, false);
  4330. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, false);
  4331. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, false);
  4332. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, false);
  4333. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4334. if (n_expert == 0) {
  4335. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4336. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4337. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4338. } else {
  4339. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
  4340. layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, false);
  4341. if (layer.ffn_gate_exps) {
  4342. layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert});
  4343. layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
  4344. } else {
  4345. // merge split expert into a single tensor for compatibility with older models
  4346. // requires disabling mmap
  4347. use_mmap_buffer = false;
  4348. ggml_type type_gate = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, 0).c_str())->type;
  4349. ggml_type type_down = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, 0).c_str())->type;
  4350. ggml_type type_up = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, 0).c_str())->type;
  4351. layer.ffn_gate_exps = ggml_new_tensor_3d(ctx_split, type_gate, n_embd, n_ff, n_expert);
  4352. layer.ffn_down_exps = ggml_new_tensor_3d(ctx_split, type_down, n_ff, n_embd, n_expert);
  4353. layer.ffn_up_exps = ggml_new_tensor_3d(ctx_split, type_up, n_embd, n_ff, n_expert);
  4354. ggml_set_name(layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i).c_str());
  4355. ggml_set_name(layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i).c_str());
  4356. ggml_set_name(layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i).c_str());
  4357. for (uint32_t x = 0; x < n_expert; ++x) {
  4358. // the individual experts are loaded into a view of the merged tensor
  4359. ml.create_tensor_as_view(ctx_split, layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_gate_exps->nb[2]*x);
  4360. ml.create_tensor_as_view(ctx_split, layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd }, layer.ffn_down_exps->nb[2]*x);
  4361. ml.create_tensor_as_view(ctx_split, layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_up_exps->nb[2]*x);
  4362. }
  4363. }
  4364. }
  4365. }
  4366. } break;
  4367. case LLM_ARCH_GROK:
  4368. {
  4369. if (n_expert == 0) {
  4370. throw std::runtime_error("Grok model cannot have zero experts");
  4371. }
  4372. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4373. // output
  4374. {
  4375. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4376. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
  4377. // if output is NULL, init from the input tok embed
  4378. if (model.output == NULL) {
  4379. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4380. ml.n_created--; // artificial tensor
  4381. ml.size_data += ggml_nbytes(model.output);
  4382. }
  4383. }
  4384. for (int i = 0; i < n_layer; ++i) {
  4385. ggml_context * ctx_layer = ctx_for_layer(i);
  4386. ggml_context * ctx_split = ctx_for_layer_split(i);
  4387. auto & layer = model.layers[i];
  4388. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4389. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4390. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4391. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4392. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4393. layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
  4394. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4395. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
  4396. layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, false);
  4397. if (layer.ffn_gate_exps) {
  4398. layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert});
  4399. layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
  4400. } else {
  4401. // merge split expert into a single tensor for compatibility with older models
  4402. // requires disabling mmap
  4403. use_mmap_buffer = false;
  4404. ggml_type type_gate = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, 0).c_str())->type;
  4405. ggml_type type_down = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, 0).c_str())->type;
  4406. ggml_type type_up = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, 0).c_str())->type;
  4407. layer.ffn_gate_exps = ggml_new_tensor_3d(ctx_split, type_gate, n_embd, n_ff, n_expert);
  4408. layer.ffn_down_exps = ggml_new_tensor_3d(ctx_split, type_down, n_ff, n_embd, n_expert);
  4409. layer.ffn_up_exps = ggml_new_tensor_3d(ctx_split, type_up, n_embd, n_ff, n_expert);
  4410. ggml_set_name(layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i).c_str());
  4411. ggml_set_name(layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i).c_str());
  4412. ggml_set_name(layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i).c_str());
  4413. for (uint32_t x = 0; x < n_expert; ++x) {
  4414. // the individual experts are loaded into a view of the merged tensor
  4415. ml.create_tensor_as_view(ctx_split, layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_gate_exps->nb[2]*x);
  4416. ml.create_tensor_as_view(ctx_split, layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd }, layer.ffn_down_exps->nb[2]*x);
  4417. ml.create_tensor_as_view(ctx_split, layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_up_exps->nb[2]*x);
  4418. }
  4419. }
  4420. layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
  4421. }
  4422. } break;
  4423. case LLM_ARCH_DBRX:
  4424. {
  4425. if (n_expert == 0) {
  4426. throw std::runtime_error("DBRX model cannot have zero experts");
  4427. }
  4428. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4429. // output
  4430. {
  4431. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4432. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4433. }
  4434. for (int i = 0; i < n_layer; ++i) {
  4435. ggml_context * ctx_layer = ctx_for_layer(i);
  4436. ggml_context * ctx_split = ctx_for_layer_split(i);
  4437. auto & layer = model.layers[i];
  4438. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4439. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4440. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4441. layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
  4442. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
  4443. layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert});
  4444. layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert});
  4445. layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
  4446. }
  4447. } break;
  4448. case LLM_ARCH_BAICHUAN:
  4449. {
  4450. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4451. {
  4452. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4453. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4454. }
  4455. for (int i = 0; i < n_layer; ++i) {
  4456. ggml_context * ctx_layer = ctx_for_layer(i);
  4457. ggml_context * ctx_split = ctx_for_layer_split(i);
  4458. auto & layer = model.layers[i];
  4459. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4460. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4461. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4462. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4463. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4464. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4465. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4466. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4467. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4468. }
  4469. } break;
  4470. case LLM_ARCH_FALCON:
  4471. {
  4472. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4473. // output
  4474. {
  4475. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4476. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4477. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
  4478. if (!model.output) {
  4479. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // needs to be on GPU
  4480. ml.n_created--; // artificial tensor
  4481. ml.size_data += ggml_nbytes(model.output);
  4482. }
  4483. }
  4484. for (int i = 0; i < n_layer; ++i) {
  4485. ggml_context * ctx_layer = ctx_for_layer(i);
  4486. ggml_context * ctx_split = ctx_for_layer_split(i);
  4487. auto & layer = model.layers[i];
  4488. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4489. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4490. layer.attn_norm_2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, false);
  4491. layer.attn_norm_2_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, false);
  4492. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4493. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4494. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4495. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4496. }
  4497. } break;
  4498. case LLM_ARCH_STARCODER:
  4499. {
  4500. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4501. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  4502. // output
  4503. {
  4504. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4505. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4506. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
  4507. if (!model.output) {
  4508. // needs to be on GPU
  4509. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4510. ml.n_created--; // artificial tensor
  4511. ml.size_data += ggml_nbytes(model.output);
  4512. }
  4513. }
  4514. for (int i = 0; i < n_layer; ++i) {
  4515. ggml_context * ctx_layer = ctx_for_layer(i);
  4516. ggml_context * ctx_split = ctx_for_layer_split(i);
  4517. auto & layer = model.layers[i];
  4518. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4519. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4520. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4521. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  4522. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4523. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  4524. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4525. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  4526. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4527. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4528. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4529. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  4530. }
  4531. } break;
  4532. case LLM_ARCH_BERT:
  4533. case LLM_ARCH_NOMIC_BERT:
  4534. {
  4535. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4536. model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type});
  4537. if (model.arch == LLM_ARCH_BERT) {
  4538. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  4539. }
  4540. model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
  4541. model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
  4542. for (int i = 0; i < n_layer; ++i) {
  4543. ggml_context * ctx_layer = ctx_for_layer(i);
  4544. ggml_context * ctx_split = ctx_for_layer_split(i);
  4545. auto & layer = model.layers[i];
  4546. if (model.arch == LLM_ARCH_BERT) {
  4547. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4548. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  4549. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4550. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  4551. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4552. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  4553. } else {
  4554. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4555. }
  4556. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4557. layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
  4558. layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
  4559. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4560. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4561. if (model.arch == LLM_ARCH_BERT) {
  4562. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  4563. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  4564. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4565. } else {
  4566. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4567. }
  4568. layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
  4569. layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
  4570. }
  4571. } break;
  4572. case LLM_ARCH_JINA_BERT_V2:
  4573. {
  4574. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // word_embeddings
  4575. model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type}); //token_type_embeddings
  4576. model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}); // LayerNorm
  4577. model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}); //LayerNorm bias
  4578. for (int i = 0; i < n_layer; ++i) {
  4579. ggml_context * ctx_layer = ctx_for_layer(i);
  4580. ggml_context * ctx_split = ctx_for_layer_split(i);
  4581. auto & layer = model.layers[i]; // JinaBertLayer
  4582. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4583. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  4584. layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, false);
  4585. layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, false);
  4586. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4587. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  4588. layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, false);
  4589. layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, false);
  4590. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4591. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  4592. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); //output_dens
  4593. layer.bo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); //output_dens
  4594. layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}); //output_norm
  4595. layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
  4596. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4597. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4598. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4599. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4600. layer.layer_out_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
  4601. layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
  4602. }
  4603. } break;
  4604. case LLM_ARCH_BLOOM:
  4605. {
  4606. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4607. model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
  4608. model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
  4609. // output
  4610. {
  4611. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4612. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4613. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4614. }
  4615. for (int i = 0; i < n_layer; ++i) {
  4616. ggml_context * ctx_layer = ctx_for_layer(i);
  4617. ggml_context * ctx_split = ctx_for_layer_split(i);
  4618. auto & layer = model.layers[i];
  4619. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4620. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4621. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4622. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  4623. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4624. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  4625. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4626. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  4627. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4628. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4629. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4630. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  4631. }
  4632. } break;
  4633. case LLM_ARCH_MPT:
  4634. {
  4635. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4636. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, false);
  4637. // output
  4638. {
  4639. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4640. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, false);
  4641. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
  4642. if (!model.output) {
  4643. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // needs to be on GPU
  4644. ml.n_created--; // artificial tensor
  4645. ml.size_data += ggml_nbytes(model.output);
  4646. }
  4647. }
  4648. for (int i = 0; i < n_layer; ++i) {
  4649. ggml_context * ctx_layer = ctx_for_layer(i);
  4650. ggml_context * ctx_split = ctx_for_layer_split(i);
  4651. auto & layer = model.layers[i];
  4652. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4653. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, false);
  4654. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4655. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, false);
  4656. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4657. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, false);
  4658. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4659. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, false);
  4660. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4661. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, false);
  4662. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4663. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, false);
  4664. layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, false);
  4665. layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, false);
  4666. layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, false);
  4667. layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, false);
  4668. // AWQ ScaleActivation layer
  4669. layer.ffn_act = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, false);
  4670. }
  4671. } break;
  4672. case LLM_ARCH_STABLELM:
  4673. {
  4674. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4675. // output
  4676. {
  4677. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4678. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4679. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4680. }
  4681. for (int i = 0; i < n_layer; ++i) {
  4682. ggml_context * ctx_layer = ctx_for_layer(i);
  4683. ggml_context * ctx_split = ctx_for_layer_split(i);
  4684. auto & layer = model.layers[i];
  4685. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4686. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4687. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4688. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4689. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4690. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4691. // optional bias tensors, present in Stable LM 2 1.6B
  4692. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, false);
  4693. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, false);
  4694. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, false);
  4695. // optional q and k layernorms, present in StableLM 2 12B
  4696. layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {hparams.n_embd_head_k, hparams.n_head}, false);
  4697. layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {hparams.n_embd_head_k, hparams.n_head_kv}, false);
  4698. // optional FFN norm, not present in StableLM 2 12B which uses parallel residual
  4699. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, false);
  4700. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, false);
  4701. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4702. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4703. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4704. }
  4705. } break;
  4706. case LLM_ARCH_QWEN:
  4707. {
  4708. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4709. // output
  4710. {
  4711. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4712. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4713. }
  4714. for (int i = 0; i < n_layer; ++i) {
  4715. ggml_context * ctx_layer = ctx_for_layer(i);
  4716. ggml_context * ctx_split = ctx_for_layer_split(i);
  4717. auto & layer = model.layers[i];
  4718. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4719. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3});
  4720. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd*3});
  4721. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4722. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4723. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2});
  4724. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd});
  4725. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff/2});
  4726. }
  4727. } break;
  4728. case LLM_ARCH_QWEN2:
  4729. {
  4730. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4731. // output
  4732. {
  4733. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4734. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
  4735. // if output is NULL, init from the input tok embed
  4736. if (model.output == NULL) {
  4737. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4738. ml.n_created--; // artificial tensor
  4739. ml.size_data += ggml_nbytes(model.output);
  4740. }
  4741. }
  4742. for (int i = 0; i < n_layer; ++i) {
  4743. ggml_context * ctx_layer = ctx_for_layer(i);
  4744. ggml_context * ctx_split = ctx_for_layer_split(i);
  4745. auto & layer = model.layers[i];
  4746. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4747. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4748. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4749. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4750. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4751. // optional bias tensors
  4752. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  4753. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  4754. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  4755. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4756. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4757. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4758. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4759. }
  4760. } break;
  4761. case LLM_ARCH_QWEN2MOE:
  4762. {
  4763. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4764. // output
  4765. {
  4766. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4767. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4768. }
  4769. for (int i = 0; i < n_layer; ++i) {
  4770. ggml_context * ctx_layer = ctx_for_layer(i);
  4771. ggml_context * ctx_split = ctx_for_layer_split(i);
  4772. auto & layer = model.layers[i];
  4773. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4774. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4775. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4776. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4777. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4778. // optional bias tensors
  4779. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  4780. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  4781. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  4782. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4783. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
  4784. GGML_ASSERT(hparams.n_expert > 0);
  4785. GGML_ASSERT(hparams.n_expert_used > 0);
  4786. // MoE branch
  4787. auto n_ff_exp = n_ff / hparams.n_expert_used;
  4788. layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert});
  4789. layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert});
  4790. layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert});
  4791. // Shared expert branch
  4792. layer.ffn_gate_inp_shexp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), {n_embd});
  4793. layer.ffn_gate_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff});
  4794. layer.ffn_down_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff, n_embd});
  4795. layer.ffn_up_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff});
  4796. }
  4797. } break;
  4798. case LLM_ARCH_PHI2:
  4799. {
  4800. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4801. // output
  4802. {
  4803. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4804. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4805. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4806. model.output_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab});
  4807. }
  4808. for (int i = 0; i < n_layer; ++i) {
  4809. ggml_context * ctx_layer = ctx_for_layer(i);
  4810. ggml_context * ctx_split = ctx_for_layer_split(i);
  4811. auto & layer = model.layers[i];
  4812. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4813. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4814. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, false);
  4815. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, false);
  4816. if (layer.wqkv == nullptr) {
  4817. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4818. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  4819. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4820. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  4821. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4822. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  4823. }
  4824. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4825. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  4826. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4827. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4828. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4829. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  4830. }
  4831. } break;
  4832. case LLM_ARCH_PHI3:
  4833. {
  4834. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab });
  4835. // output
  4836. {
  4837. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd });
  4838. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab });
  4839. }
  4840. for (int i = 0; i < n_layer; ++i) {
  4841. ggml_context* ctx_layer = ctx_for_layer(i);
  4842. ggml_context* ctx_split = ctx_for_layer_split(i);
  4843. auto& layer = model.layers[i];
  4844. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd });
  4845. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, false);
  4846. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd });
  4847. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd });
  4848. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd });
  4849. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff });
  4850. }
  4851. } break;
  4852. case LLM_ARCH_PLAMO:
  4853. {
  4854. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4855. // output
  4856. {
  4857. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4858. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4859. }
  4860. for (int i = 0; i < n_layer; ++i) {
  4861. ggml_context * ctx_layer = ctx_for_layer(i);
  4862. ggml_context * ctx_split = ctx_for_layer_split(i);
  4863. auto & layer = model.layers[i];
  4864. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4865. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4866. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4867. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4868. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4869. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4870. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4871. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4872. }
  4873. } break;
  4874. case LLM_ARCH_GPT2:
  4875. {
  4876. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4877. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  4878. // output
  4879. {
  4880. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4881. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4882. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4883. }
  4884. for (int i = 0; i < n_layer; ++i) {
  4885. ggml_context * ctx_layer = ctx_for_layer(i);
  4886. ggml_context * ctx_split = ctx_for_layer_split(i);
  4887. auto & layer = model.layers[i];
  4888. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4889. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4890. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4891. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  4892. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4893. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  4894. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4895. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  4896. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4897. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4898. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4899. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  4900. }
  4901. } break;
  4902. case LLM_ARCH_CODESHELL:
  4903. {
  4904. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4905. // output
  4906. {
  4907. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4908. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4909. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4910. }
  4911. for (int i = 0; i < n_layer; ++i) {
  4912. ggml_context * ctx_layer = ctx_for_layer(i);
  4913. ggml_context * ctx_split = ctx_for_layer_split(i);
  4914. auto & layer = model.layers[i];
  4915. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4916. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4917. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4918. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  4919. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4920. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  4921. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4922. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  4923. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4924. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4925. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4926. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  4927. }
  4928. } break;
  4929. case LLM_ARCH_ORION:
  4930. {
  4931. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4932. {
  4933. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4934. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4935. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4936. }
  4937. for (int i = 0; i < n_layer; ++i) {
  4938. ggml_context * ctx_layer = ctx_for_layer(i);
  4939. ggml_context * ctx_split = ctx_for_layer_split(i);
  4940. auto & layer = model.layers[i];
  4941. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4942. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4943. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4944. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4945. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4946. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4947. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4948. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  4949. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4950. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4951. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4952. }
  4953. } break;
  4954. case LLM_ARCH_INTERNLM2:
  4955. {
  4956. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4957. // output
  4958. {
  4959. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4960. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4961. }
  4962. for (int i = 0; i < n_layer; ++i) {
  4963. ggml_context * ctx_layer = ctx_for_layer(i);
  4964. ggml_context * ctx_split = ctx_for_layer_split(i);
  4965. auto & layer = model.layers[i];
  4966. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4967. // layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4968. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4969. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4970. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4971. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4972. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4973. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4974. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4975. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4976. }
  4977. } break;
  4978. case LLM_ARCH_GEMMA:
  4979. {
  4980. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4981. // output
  4982. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4983. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // same as tok_embd, duplicated to allow offloading
  4984. ml.n_created--; // artificial tensor
  4985. ml.size_data += ggml_nbytes(model.output);
  4986. const int64_t n_ff = hparams.n_ff;
  4987. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  4988. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  4989. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  4990. for (uint32_t i = 0; i < n_layer; ++i) {
  4991. ggml_context * ctx_layer = ctx_for_layer(i);
  4992. ggml_context * ctx_split = ctx_for_layer_split(i);
  4993. auto & layer = model.layers[i];
  4994. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4995. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * hparams.n_head});
  4996. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
  4997. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
  4998. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * hparams.n_head, n_embd});
  4999. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5000. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5001. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5002. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5003. }
  5004. } break;
  5005. case LLM_ARCH_STARCODER2:
  5006. {
  5007. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5008. // output
  5009. {
  5010. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5011. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  5012. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
  5013. // if output is NULL, init from the input tok embed
  5014. if (model.output == NULL) {
  5015. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5016. ml.n_created--; // artificial tensor
  5017. ml.size_data += ggml_nbytes(model.output);
  5018. }
  5019. }
  5020. for (int i = 0; i < n_layer; ++i) {
  5021. ggml_context * ctx_layer = ctx_for_layer(i);
  5022. ggml_context * ctx_split = ctx_for_layer_split(i);
  5023. auto & layer = model.layers[i];
  5024. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5025. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  5026. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5027. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5028. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5029. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5030. // optional bias tensors
  5031. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  5032. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  5033. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  5034. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  5035. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5036. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  5037. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5038. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5039. // optional bias tensors
  5040. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  5041. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP , "bias", i), { n_ff});
  5042. }
  5043. } break;
  5044. case LLM_ARCH_MAMBA:
  5045. {
  5046. const int64_t d_conv = hparams.ssm_d_conv;
  5047. const int64_t d_inner = hparams.ssm_d_inner;
  5048. const int64_t d_state = hparams.ssm_d_state;
  5049. const int64_t dt_rank = hparams.ssm_dt_rank;
  5050. // only an expansion factor of 2 is supported for now
  5051. GGML_ASSERT(2 * n_embd == d_inner);
  5052. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5053. // output
  5054. {
  5055. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5056. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
  5057. // if output is NULL, init from the input tok embed, duplicated to allow offloading
  5058. if (model.output == NULL) {
  5059. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5060. ml.n_created--; // artificial tensor
  5061. ml.size_data += ggml_nbytes(model.output);
  5062. }
  5063. }
  5064. for (int i = 0; i < n_layer; ++i) {
  5065. ggml_context * ctx_layer = ctx_for_layer(i);
  5066. ggml_context * ctx_split = ctx_for_layer_split(i);
  5067. auto & layer = model.layers[i];
  5068. // norm
  5069. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5070. layer.ssm_in = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner});
  5071. layer.ssm_conv1d = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner});
  5072. layer.ssm_conv1d_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner});
  5073. layer.ssm_x = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state});
  5074. layer.ssm_dt = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner});
  5075. layer.ssm_dt_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner});
  5076. // no "weight" suffix for these
  5077. layer.ssm_a = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner});
  5078. layer.ssm_d = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_D, i), {d_inner});
  5079. // out_proj
  5080. layer.ssm_out = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd});
  5081. }
  5082. } break;
  5083. case LLM_ARCH_XVERSE:
  5084. {
  5085. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5086. {
  5087. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5088. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5089. }
  5090. for (int i = 0; i < n_layer; ++i) {
  5091. ggml_context * ctx_layer = ctx_for_layer(i);
  5092. ggml_context * ctx_split = ctx_for_layer_split(i);
  5093. auto & layer = model.layers[i];
  5094. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5095. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5096. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5097. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5098. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5099. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5100. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5101. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5102. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5103. }
  5104. } break;
  5105. case LLM_ARCH_COMMAND_R:
  5106. {
  5107. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5108. // output
  5109. {
  5110. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5111. // init output from the input tok embed
  5112. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5113. ml.n_created--; // artificial tensor
  5114. ml.size_data += ggml_nbytes(model.output);
  5115. }
  5116. for (int i = 0; i < n_layer; ++i) {
  5117. ggml_context * ctx_layer = ctx_for_layer(i);
  5118. ggml_context * ctx_split = ctx_for_layer_split(i);
  5119. auto & layer = model.layers[i];
  5120. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5121. if (n_layer >= 64){
  5122. layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {hparams.n_embd_head_k, hparams.n_head});
  5123. layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {hparams.n_embd_head_k, hparams.n_head_kv});
  5124. }
  5125. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5126. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5127. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5128. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5129. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5130. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5131. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5132. }
  5133. } break;
  5134. case LLM_ARCH_OLMO: // adapted from LLM_ARCH_LLAMA with norm params removed
  5135. {
  5136. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5137. // output
  5138. {
  5139. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
  5140. // if output is NULL, init from the input tok embed
  5141. if (model.output == NULL) {
  5142. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5143. ml.n_created--; // artificial tensor
  5144. ml.size_data += ggml_nbytes(model.output);
  5145. }
  5146. }
  5147. for (int i = 0; i < n_layer; ++i) {
  5148. ggml_context * ctx_split = ctx_for_layer_split(i);
  5149. auto & layer = model.layers[i];
  5150. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5151. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5152. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5153. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5154. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5155. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5156. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5157. }
  5158. } break;
  5159. default:
  5160. throw std::runtime_error("unknown architecture");
  5161. }
  5162. }
  5163. ml.done_getting_tensors();
  5164. ml.init_mappings(true, use_mlock ? &model.mlock_mmaps : nullptr);
  5165. model.mappings.reserve(ml.mappings.size());
  5166. // create the backend buffers
  5167. std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_bufs;
  5168. ctx_bufs.reserve(ctx_map.size());
  5169. // Ensure we have enough capacity for the maximum backend buffer we will potentially create
  5170. size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
  5171. model.bufs.reserve(n_max_backend_buffer);
  5172. for (auto & it : ctx_map) {
  5173. ggml_backend_buffer_type_t buft = it.first;
  5174. ggml_context * ctx = it.second;
  5175. llama_buf_map bufs;
  5176. bufs.reserve(n_max_backend_buffer);
  5177. // only the mmap region containing the tensors in the model is mapped to the backend buffer
  5178. // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
  5179. // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
  5180. if (ml.use_mmap && use_mmap_buffer && buft == llama_default_buffer_type_cpu(true)) {
  5181. for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
  5182. void * addr = nullptr;
  5183. size_t first, last;
  5184. ml.get_mapping_range(&first, &last, &addr, idx, ctx);
  5185. if (first >= last) {
  5186. continue;
  5187. }
  5188. ggml_backend_buffer_t buf = ggml_backend_cpu_buffer_from_ptr((char *) addr + first, last - first);
  5189. if (buf == nullptr) {
  5190. throw std::runtime_error("unable to allocate backend CPU buffer");
  5191. }
  5192. model.bufs.push_back(buf);
  5193. bufs.emplace(idx, buf);
  5194. #ifdef GGML_USE_CUDA
  5195. if (n_layer >= n_gpu_layers) {
  5196. ggml_backend_cuda_register_host_buffer(
  5197. ggml_backend_buffer_get_base(buf),
  5198. ggml_backend_buffer_get_size(buf));
  5199. }
  5200. #endif
  5201. }
  5202. }
  5203. #ifdef GGML_USE_METAL
  5204. else if (ml.use_mmap && use_mmap_buffer && buft == ggml_backend_metal_buffer_type()) {
  5205. for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
  5206. const size_t max_size = ggml_get_max_tensor_size(ctx);
  5207. void * addr = nullptr;
  5208. size_t first, last;
  5209. ml.get_mapping_range(&first, &last, &addr, idx, ctx);
  5210. if (first >= last) {
  5211. continue;
  5212. }
  5213. ggml_backend_buffer_t buf = ggml_backend_metal_buffer_from_ptr((char *) addr + first, last - first, max_size);
  5214. if (buf == nullptr) {
  5215. throw std::runtime_error("unable to allocate backend metal buffer");
  5216. }
  5217. model.bufs.push_back(buf);
  5218. bufs.emplace(idx, buf);
  5219. }
  5220. }
  5221. #endif
  5222. else {
  5223. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  5224. if (buf == nullptr) {
  5225. throw std::runtime_error("unable to allocate backend buffer");
  5226. }
  5227. model.bufs.push_back(buf);
  5228. if (use_mlock && ggml_backend_buffer_is_host(buf)) {
  5229. model.mlock_bufs.emplace_back(new llama_mlock);
  5230. auto & mlock_buf = model.mlock_bufs.back();
  5231. mlock_buf->init (ggml_backend_buffer_get_base(buf));
  5232. mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
  5233. }
  5234. for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
  5235. bufs.emplace(idx, buf);
  5236. }
  5237. }
  5238. if (bufs.empty()) {
  5239. throw std::runtime_error("failed to allocate buffer");
  5240. }
  5241. for (auto & buf : bufs) {
  5242. // indicate that this buffer contains weights
  5243. // this is used by ggml_backend_sched to improve op scheduling -> ops that use a weight are preferably scheduled to the backend that contains the weight
  5244. ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  5245. }
  5246. ctx_bufs.emplace_back(ctx, bufs);
  5247. }
  5248. if (llama_supports_gpu_offload()) {
  5249. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  5250. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  5251. if (n_gpu_layers > (int) hparams.n_layer) {
  5252. LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
  5253. }
  5254. const int max_backend_supported_layers = hparams.n_layer + 1;
  5255. const int max_offloadable_layers = hparams.n_layer + 1;
  5256. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  5257. }
  5258. // print memory requirements
  5259. for (ggml_backend_buffer_t buf : model.bufs) {
  5260. LLAMA_LOG_INFO("%s: %10s buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
  5261. }
  5262. // populate tensors_by_name
  5263. for (ggml_context * ctx : model.ctxs) {
  5264. for (auto * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
  5265. model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
  5266. }
  5267. }
  5268. // load tensor data
  5269. for (auto & it : ctx_bufs) {
  5270. ggml_context * ctx = it.first;
  5271. auto & bufs = it.second;
  5272. if (!ml.load_all_data(ctx, bufs, use_mlock ? &model.mlock_mmaps : NULL, progress_callback, progress_callback_user_data)) {
  5273. return false;
  5274. }
  5275. }
  5276. if (use_mmap_buffer) {
  5277. for (auto & mapping : ml.mappings) {
  5278. model.mappings.emplace_back(std::move(mapping));
  5279. }
  5280. }
  5281. // loading time will be recalculate after the first eval, so
  5282. // we take page faults deferred by mmap() into consideration
  5283. model.t_load_us = ggml_time_us() - model.t_start_us;
  5284. return true;
  5285. }
  5286. // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
  5287. static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
  5288. try {
  5289. llama_model_loader ml(fname, params.use_mmap, params.check_tensors, params.kv_overrides);
  5290. model.hparams.vocab_only = params.vocab_only;
  5291. try {
  5292. llm_load_arch(ml, model);
  5293. } catch(const std::exception & e) {
  5294. throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
  5295. }
  5296. try {
  5297. llm_load_hparams(ml, model);
  5298. } catch(const std::exception & e) {
  5299. throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
  5300. }
  5301. try {
  5302. llm_load_vocab(ml, model);
  5303. } catch(const std::exception & e) {
  5304. throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
  5305. }
  5306. llm_load_print_meta(ml, model);
  5307. if (model.vocab.type != LLAMA_VOCAB_TYPE_NONE &&
  5308. model.hparams.n_vocab != model.vocab.id_to_token.size()) {
  5309. throw std::runtime_error("vocab size mismatch");
  5310. }
  5311. if (params.vocab_only) {
  5312. LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
  5313. return 0;
  5314. }
  5315. #ifdef GGML_USE_KOMPUTE
  5316. if (params.n_gpu_layers > 0 && (
  5317. !(model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON)
  5318. || !(
  5319. model.ftype == LLAMA_FTYPE_ALL_F32 ||
  5320. model.ftype == LLAMA_FTYPE_MOSTLY_F16 ||
  5321. model.ftype == LLAMA_FTYPE_MOSTLY_BF16 ||
  5322. model.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
  5323. model.ftype == LLAMA_FTYPE_MOSTLY_Q4_1
  5324. )
  5325. )) {
  5326. // TODO(cebtenzzre): propagate this error outside of llama_load_model_from_file
  5327. LLAMA_LOG_WARN("%s: disabling Kompute due to unsupported model arch or quantization\n", __func__);
  5328. params.n_gpu_layers = 0;
  5329. }
  5330. #endif
  5331. #ifdef GGML_USE_SYCL
  5332. if (params.split_mode == LLAMA_SPLIT_MODE_NONE) {
  5333. ggml_backend_sycl_set_single_device_mode(params.main_gpu);
  5334. //SYCL use device index (0, 1, 2) directly, uer input device id, then convert to device index.
  5335. params.main_gpu = ggml_backend_sycl_get_device_index(params.main_gpu);
  5336. } else {
  5337. ggml_backend_sycl_set_mul_device_mode();
  5338. }
  5339. #endif
  5340. if (!llm_load_tensors(
  5341. ml, model, params.n_gpu_layers, params.split_mode, params.main_gpu, params.tensor_split, params.use_mlock,
  5342. params.progress_callback, params.progress_callback_user_data
  5343. )) {
  5344. return -2;
  5345. }
  5346. } catch (const std::exception & err) {
  5347. LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
  5348. return -1;
  5349. }
  5350. return 0;
  5351. }
  5352. //
  5353. // llm_build
  5354. //
  5355. using llm_build_cb = std::function<void(struct ggml_tensor * cur, const char * name, int nl)>;
  5356. enum llm_ffn_op_type {
  5357. LLM_FFN_SILU,
  5358. LLM_FFN_GELU,
  5359. LLM_FFN_RELU,
  5360. LLM_FFN_RELU_SQR,
  5361. };
  5362. enum llm_ffn_gate_type {
  5363. LLM_FFN_SEQ,
  5364. LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
  5365. };
  5366. enum llm_norm_type {
  5367. LLM_NORM,
  5368. LLM_NORM_RMS,
  5369. };
  5370. static struct ggml_tensor * llm_build_inp_embd(
  5371. struct ggml_context * ctx,
  5372. struct llama_context & lctx,
  5373. const llama_hparams & hparams,
  5374. const llama_batch & batch,
  5375. struct ggml_tensor * tok_embd,
  5376. const llm_build_cb & cb) {
  5377. const int64_t n_embd = hparams.n_embd;
  5378. struct ggml_tensor * inpL;
  5379. if (batch.token) {
  5380. lctx.inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
  5381. cb(lctx.inp_tokens, "inp_tokens", -1);
  5382. ggml_set_input(lctx.inp_tokens);
  5383. inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens);
  5384. } else {
  5385. lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
  5386. inpL = lctx.inp_embd;
  5387. ggml_set_input(lctx.inp_embd);
  5388. }
  5389. cb(inpL, "inp_embd", -1);
  5390. return inpL;
  5391. }
  5392. static void llm_build_kv_store(
  5393. struct ggml_context * ctx,
  5394. const llama_hparams & hparams,
  5395. const llama_cparams & cparams,
  5396. const llama_kv_cache & kv,
  5397. struct ggml_cgraph * graph,
  5398. struct ggml_tensor * k_cur,
  5399. struct ggml_tensor * v_cur,
  5400. int32_t n_tokens,
  5401. int32_t kv_head,
  5402. const llm_build_cb & cb,
  5403. int64_t il) {
  5404. const int64_t n_ctx = cparams.n_ctx;
  5405. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  5406. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  5407. GGML_ASSERT(kv.size == n_ctx);
  5408. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_k_gqa,
  5409. (ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa))*kv_head);
  5410. cb(k_cache_view, "k_cache_view", il);
  5411. // note: storing RoPE-ed version of K in the KV cache
  5412. ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view));
  5413. assert(v_cur->ne[0] == n_embd_v_gqa && v_cur->ne[1] == n_tokens);
  5414. struct ggml_tensor * v_cache_view = nullptr;
  5415. if (cparams.flash_attn) {
  5416. v_cache_view = ggml_view_1d(ctx, kv.v_l[il], n_tokens*n_embd_v_gqa,
  5417. (kv_head)*ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa));
  5418. } else {
  5419. // note: the V cache is transposed when not using flash attention
  5420. v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa,
  5421. ( n_ctx)*ggml_element_size(kv.v_l[il]),
  5422. (kv_head)*ggml_element_size(kv.v_l[il]));
  5423. v_cur = ggml_transpose(ctx, v_cur);
  5424. }
  5425. cb(v_cache_view, "v_cache_view", il);
  5426. ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur, v_cache_view));
  5427. }
  5428. static struct ggml_tensor * llm_build_norm(
  5429. struct ggml_context * ctx,
  5430. struct ggml_tensor * cur,
  5431. const llama_hparams & hparams,
  5432. struct ggml_tensor * mw,
  5433. struct ggml_tensor * mb,
  5434. llm_norm_type type,
  5435. const llm_build_cb & cb,
  5436. int il) {
  5437. switch (type) {
  5438. case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break;
  5439. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break;
  5440. }
  5441. if (mw || mb) {
  5442. cb(cur, "norm", il);
  5443. }
  5444. if (mw) {
  5445. cur = ggml_mul(ctx, cur, mw);
  5446. if (mb) {
  5447. cb(cur, "norm_w", il);
  5448. }
  5449. }
  5450. if (mb) {
  5451. cur = ggml_add(ctx, cur, mb);
  5452. }
  5453. return cur;
  5454. }
  5455. static struct ggml_tensor * llm_build_ffn(
  5456. struct ggml_context * ctx,
  5457. struct ggml_tensor * cur,
  5458. struct ggml_tensor * up,
  5459. struct ggml_tensor * up_b,
  5460. struct ggml_tensor * gate,
  5461. struct ggml_tensor * gate_b,
  5462. struct ggml_tensor * down,
  5463. struct ggml_tensor * down_b,
  5464. struct ggml_tensor * act_scales,
  5465. llm_ffn_op_type type_op,
  5466. llm_ffn_gate_type type_gate,
  5467. const llm_build_cb & cb,
  5468. int il) {
  5469. struct ggml_tensor * tmp = up ? ggml_mul_mat(ctx, up, cur) : cur;
  5470. cb(tmp, "ffn_up", il);
  5471. if (up_b) {
  5472. tmp = ggml_add(ctx, tmp, up_b);
  5473. cb(tmp, "ffn_up_b", il);
  5474. }
  5475. if (gate) {
  5476. switch (type_gate) {
  5477. case LLM_FFN_SEQ:
  5478. {
  5479. cur = ggml_mul_mat(ctx, gate, tmp);
  5480. cb(cur, "ffn_gate", il);
  5481. } break;
  5482. case LLM_FFN_PAR:
  5483. {
  5484. cur = ggml_mul_mat(ctx, gate, cur);
  5485. cb(cur, "ffn_gate", il);
  5486. } break;
  5487. }
  5488. if (gate_b) {
  5489. cur = ggml_add(ctx, cur, gate_b);
  5490. cb(cur, "ffn_gate_b", il);
  5491. }
  5492. } else {
  5493. cur = tmp;
  5494. }
  5495. switch (type_op) {
  5496. case LLM_FFN_SILU:
  5497. {
  5498. cur = ggml_silu(ctx, cur);
  5499. cb(cur, "ffn_silu", il);
  5500. } break;
  5501. case LLM_FFN_GELU:
  5502. {
  5503. cur = ggml_gelu(ctx, cur);
  5504. cb(cur, "ffn_gelu", il);
  5505. if (act_scales != NULL) {
  5506. cur = ggml_div(ctx, cur, act_scales);
  5507. cb(cur, "ffn_act", il);
  5508. }
  5509. } break;
  5510. case LLM_FFN_RELU:
  5511. {
  5512. cur = ggml_relu(ctx, cur);
  5513. cb(cur, "ffn_relu", il);
  5514. } break;
  5515. case LLM_FFN_RELU_SQR:
  5516. {
  5517. cur = ggml_relu(ctx, cur);
  5518. cb(cur, "ffn_relu", il);
  5519. cur = ggml_sqr(ctx, cur);
  5520. cb(cur, "ffn_sqr(relu)", il);
  5521. } break;
  5522. }
  5523. if (type_gate == LLM_FFN_PAR) {
  5524. cur = ggml_mul(ctx, cur, tmp);
  5525. cb(cur, "ffn_gate_par", il);
  5526. }
  5527. cur = ggml_mul_mat(ctx, down, cur);
  5528. if (down_b) {
  5529. cb(cur, "ffn_down", il);
  5530. }
  5531. if (down_b) {
  5532. cur = ggml_add(ctx, cur, down_b);
  5533. }
  5534. return cur;
  5535. }
  5536. static struct ggml_tensor * llm_build_moe_ffn(
  5537. struct ggml_context * ctx,
  5538. struct ggml_tensor * cur,
  5539. struct ggml_tensor * gate_inp,
  5540. struct ggml_tensor * up_exps,
  5541. struct ggml_tensor * gate_exps,
  5542. struct ggml_tensor * down_exps,
  5543. int64_t n_expert,
  5544. int64_t n_expert_used,
  5545. llm_ffn_op_type type_op,
  5546. bool norm_w,
  5547. const llm_build_cb & cb,
  5548. int il) {
  5549. int64_t n_embd = cur->ne[0];
  5550. int64_t n_tokens = cur->ne[1];
  5551. ggml_tensor * logits = ggml_mul_mat(ctx, gate_inp, cur); // [n_expert, n_tokens]
  5552. cb(logits, "ffn_moe_logits", il);
  5553. ggml_tensor * probs = ggml_soft_max(ctx, logits); // [n_expert, n_tokens]
  5554. cb(probs, "ffn_moe_probs", il);
  5555. // select experts
  5556. ggml_tensor * selected_experts = ggml_top_k(ctx, probs, n_expert_used); // [n_expert_used, n_tokens]
  5557. cb(selected_experts->src[0], "ffn_moe_argsort", il);
  5558. cb(selected_experts, "ffn_moe_topk", il);
  5559. ggml_tensor * weights = ggml_get_rows(ctx,
  5560. ggml_reshape_3d(ctx, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens]
  5561. cb(weights, "ffn_moe_weights", il);
  5562. if (norm_w) {
  5563. weights = ggml_reshape_2d(ctx, weights, n_expert_used, n_tokens);
  5564. ggml_tensor * weights_sum = ggml_sum_rows(ctx, weights); // [1, n_tokens]
  5565. cb(weights_sum, "ffn_moe_weights_sum", il);
  5566. weights = ggml_div(ctx, weights, weights_sum); // [n_expert_used, n_tokens]
  5567. cb(weights, "ffn_moe_weights_norm", il);
  5568. weights = ggml_reshape_3d(ctx, weights, 1, n_expert_used, n_tokens);
  5569. }
  5570. cur = ggml_reshape_3d(ctx, cur, n_embd, 1, n_tokens);
  5571. ggml_tensor * up = ggml_mul_mat_id(ctx, up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
  5572. cb(up, "ffn_moe_up", il);
  5573. ggml_tensor * gate = ggml_mul_mat_id(ctx, gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
  5574. cb(gate, "ffn_moe_gate", il);
  5575. switch (type_op) {
  5576. case LLM_FFN_SILU:
  5577. {
  5578. gate = ggml_silu(ctx, gate);
  5579. cb(gate, "ffn_moe_silu", il);
  5580. } break;
  5581. case LLM_FFN_GELU:
  5582. {
  5583. gate = ggml_gelu(ctx, gate);
  5584. cb(gate, "ffn_moe_gelu", il);
  5585. } break;
  5586. default:
  5587. GGML_ASSERT(false);
  5588. }
  5589. ggml_tensor * par = ggml_mul(ctx, up, gate); // [n_ff, n_expert_used, n_tokens]
  5590. cb(par, "ffn_moe_gate_par", il);
  5591. ggml_tensor * experts = ggml_mul_mat_id(ctx, down_exps, par, selected_experts); // [n_embd, n_expert_used, n_tokens]
  5592. cb(experts, "ffn_moe_down", il);
  5593. experts = ggml_mul(ctx, experts, weights);
  5594. // aggregate experts
  5595. ggml_tensor * moe_out = nullptr;
  5596. for (int i = 0; i < n_expert_used; ++i) {
  5597. ggml_tensor * cur_expert = ggml_view_2d(ctx, experts, n_embd, n_tokens,
  5598. experts->nb[2], i*experts->nb[1]);
  5599. if (i == 0) {
  5600. moe_out = cur_expert;
  5601. } else {
  5602. moe_out = ggml_add(ctx, moe_out, cur_expert);
  5603. }
  5604. }
  5605. if (n_expert_used == 1) {
  5606. // avoid returning a non-contiguous tensor
  5607. moe_out = ggml_cont(ctx, moe_out);
  5608. }
  5609. return moe_out;
  5610. }
  5611. static struct ggml_tensor * llm_build_kqv(
  5612. struct ggml_context * ctx,
  5613. const llama_model & model,
  5614. const llama_hparams & hparams,
  5615. const llama_cparams & cparams,
  5616. const llama_kv_cache & kv,
  5617. struct ggml_cgraph * graph,
  5618. struct ggml_tensor * wo,
  5619. struct ggml_tensor * wo_b,
  5620. struct ggml_tensor * q_cur,
  5621. struct ggml_tensor * kq_mask,
  5622. int32_t n_tokens,
  5623. int32_t n_kv,
  5624. float kq_scale,
  5625. const llm_build_cb & cb,
  5626. int il) {
  5627. const int64_t n_ctx = cparams.n_ctx;
  5628. const int64_t n_head = hparams.n_head;
  5629. const int64_t n_head_kv = hparams.n_head_kv;
  5630. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  5631. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  5632. const int64_t n_embd_head_v = hparams.n_embd_head_v;
  5633. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  5634. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  5635. cb(q, "q", il);
  5636. struct ggml_tensor * k =
  5637. ggml_view_3d(ctx, kv.k_l[il],
  5638. n_embd_head_k, n_kv, n_head_kv,
  5639. ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa),
  5640. ggml_row_size(kv.k_l[il]->type, n_embd_head_k),
  5641. 0);
  5642. cb(k, "k", il);
  5643. struct ggml_tensor * cur;
  5644. if (cparams.flash_attn) {
  5645. GGML_UNUSED(model);
  5646. GGML_UNUSED(n_ctx);
  5647. // split cached v into n_head heads (not transposed)
  5648. struct ggml_tensor * v =
  5649. ggml_view_3d(ctx, kv.v_l[il],
  5650. n_embd_head_v, n_kv, n_head_kv,
  5651. ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa),
  5652. ggml_row_size(kv.v_l[il]->type, n_embd_head_v),
  5653. 0);
  5654. cb(v, "v", il);
  5655. cur = ggml_flash_attn_ext(ctx, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias);
  5656. if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3) {
  5657. ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
  5658. }
  5659. cur = ggml_reshape_2d(ctx, cur, n_embd_head_v*n_head, n_tokens);
  5660. } else {
  5661. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  5662. cb(kq, "kq", il);
  5663. if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3) {
  5664. // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
  5665. // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
  5666. ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  5667. }
  5668. if (model.arch == LLM_ARCH_GROK) {
  5669. // need to do the following:
  5670. // multiply by attn_output_multiplyer of 0.08838834764831845
  5671. // and then :
  5672. // kq = 30 * tanh(kq / 30)
  5673. // before the softmax below
  5674. //try from phi2
  5675. //ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  5676. kq = ggml_tanh(ctx, ggml_scale(ctx, kq, 0.08838834764831845f/30.0f));
  5677. kq = ggml_scale(ctx, kq, 30);
  5678. }
  5679. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias);
  5680. cb(kq, "kq_soft_max_ext", il);
  5681. GGML_ASSERT(kv.size == n_ctx);
  5682. // split cached v into n_head heads
  5683. struct ggml_tensor * v =
  5684. ggml_view_3d(ctx, kv.v_l[il],
  5685. n_kv, n_embd_head_v, n_head_kv,
  5686. ggml_element_size(kv.v_l[il])*n_ctx,
  5687. ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head_v,
  5688. 0);
  5689. cb(v, "v", il);
  5690. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  5691. cb(kqv, "kqv", il);
  5692. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  5693. cb(kqv_merged, "kqv_merged", il);
  5694. cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_v*n_head, n_tokens);
  5695. cb(cur, "kqv_merged_cont", il);
  5696. }
  5697. ggml_build_forward_expand(graph, cur);
  5698. cur = ggml_mul_mat(ctx, wo, cur);
  5699. if (wo_b) {
  5700. cb(cur, "kqv_wo", il);
  5701. }
  5702. if (wo_b) {
  5703. cur = ggml_add(ctx, cur, wo_b);
  5704. }
  5705. return cur;
  5706. }
  5707. static struct ggml_tensor * llm_build_kv(
  5708. struct ggml_context * ctx,
  5709. const llama_model & model,
  5710. const llama_hparams & hparams,
  5711. const llama_cparams & cparams,
  5712. const llama_kv_cache & kv,
  5713. struct ggml_cgraph * graph,
  5714. struct ggml_tensor * wo,
  5715. struct ggml_tensor * wo_b,
  5716. struct ggml_tensor * k_cur,
  5717. struct ggml_tensor * v_cur,
  5718. struct ggml_tensor * q_cur,
  5719. struct ggml_tensor * kq_mask,
  5720. int32_t n_tokens,
  5721. int32_t kv_head,
  5722. int32_t n_kv,
  5723. float kq_scale,
  5724. const llm_build_cb & cb,
  5725. int il) {
  5726. // these nodes are added to the graph together so that they are not reordered
  5727. // by doing so, the number of splits in the graph is reduced
  5728. ggml_build_forward_expand(graph, q_cur);
  5729. ggml_build_forward_expand(graph, k_cur);
  5730. ggml_build_forward_expand(graph, v_cur);
  5731. llm_build_kv_store(ctx, hparams, cparams, kv, graph, k_cur, v_cur, n_tokens, kv_head, cb, il);
  5732. struct ggml_tensor * cur;
  5733. cur = llm_build_kqv(ctx, model, hparams, cparams, kv, graph, wo, wo_b,
  5734. q_cur, kq_mask, n_tokens, n_kv, kq_scale, cb, il);
  5735. cb(cur, "kqv_out", il);
  5736. return cur;
  5737. }
  5738. struct llm_build_context {
  5739. const llama_model & model;
  5740. llama_context & lctx;
  5741. const llama_hparams & hparams;
  5742. const llama_cparams & cparams;
  5743. const llama_batch & batch;
  5744. const llama_kv_cache & kv_self;
  5745. const int64_t n_embd;
  5746. const int64_t n_layer;
  5747. const int64_t n_rot;
  5748. const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
  5749. const int64_t n_head;
  5750. const int64_t n_head_kv;
  5751. const int64_t n_embd_head_k;
  5752. const int64_t n_embd_k_gqa;
  5753. const int64_t n_embd_head_v;
  5754. const int64_t n_embd_v_gqa;
  5755. const int64_t n_expert;
  5756. const int64_t n_expert_used;
  5757. const float freq_base;
  5758. const float freq_scale;
  5759. const float ext_factor;
  5760. const float attn_factor;
  5761. const float beta_fast;
  5762. const float beta_slow;
  5763. const float norm_eps;
  5764. const float norm_rms_eps;
  5765. const int32_t n_tokens;
  5766. const int32_t n_kv; // size of KV cache to consider (n_kv <= kv_self.size)
  5767. const int32_t n_outputs;
  5768. const int32_t kv_head; // index of where we store new KV data in the cache
  5769. const int32_t n_orig_ctx;
  5770. const bool flash_attn;
  5771. const enum llama_pooling_type pooling_type;
  5772. const enum llama_rope_type rope_type;
  5773. const llm_build_cb & cb;
  5774. std::vector<uint8_t> & buf_compute_meta;
  5775. struct ggml_context * ctx0 = nullptr;
  5776. // TODO: consider making the entire interface noexcept
  5777. llm_build_context(
  5778. llama_context & lctx,
  5779. const llama_batch & batch,
  5780. const llm_build_cb & cb,
  5781. bool worst_case) :
  5782. model (lctx.model),
  5783. lctx (lctx),
  5784. hparams (model.hparams),
  5785. cparams (lctx.cparams),
  5786. batch (batch),
  5787. kv_self (lctx.kv_self),
  5788. n_embd (hparams.n_embd),
  5789. n_layer (hparams.n_layer),
  5790. n_rot (hparams.n_rot),
  5791. n_ctx (cparams.n_ctx),
  5792. n_head (hparams.n_head),
  5793. n_head_kv (hparams.n_head_kv),
  5794. n_embd_head_k (hparams.n_embd_head_k),
  5795. n_embd_k_gqa (hparams.n_embd_k_gqa()),
  5796. n_embd_head_v (hparams.n_embd_head_v),
  5797. n_embd_v_gqa (hparams.n_embd_v_gqa()),
  5798. n_expert (hparams.n_expert),
  5799. n_expert_used (hparams.n_expert_used),
  5800. freq_base (cparams.rope_freq_base),
  5801. freq_scale (cparams.rope_freq_scale),
  5802. ext_factor (cparams.yarn_ext_factor),
  5803. attn_factor (cparams.yarn_attn_factor),
  5804. beta_fast (cparams.yarn_beta_fast),
  5805. beta_slow (cparams.yarn_beta_slow),
  5806. norm_eps (hparams.f_norm_eps),
  5807. norm_rms_eps (hparams.f_norm_rms_eps),
  5808. n_tokens (batch.n_tokens),
  5809. n_kv (worst_case ? kv_self.size : kv_self.n),
  5810. n_outputs (worst_case ? n_tokens : lctx.n_outputs),
  5811. kv_head (worst_case ? (kv_self.recurrent ? 0 : kv_self.size - n_tokens) : kv_self.head),
  5812. n_orig_ctx (cparams.n_yarn_orig_ctx),
  5813. flash_attn (cparams.flash_attn),
  5814. pooling_type (cparams.pooling_type),
  5815. rope_type (hparams.rope_type),
  5816. cb (cb),
  5817. buf_compute_meta (lctx.buf_compute_meta) {
  5818. // all initializations should be done in init()
  5819. }
  5820. void init() {
  5821. struct ggml_init_params params = {
  5822. /*.mem_size =*/ buf_compute_meta.size(),
  5823. /*.mem_buffer =*/ buf_compute_meta.data(),
  5824. /*.no_alloc =*/ true,
  5825. };
  5826. ctx0 = ggml_init(params);
  5827. lctx.inp_tokens = nullptr;
  5828. lctx.inp_embd = nullptr;
  5829. lctx.inp_pos = nullptr;
  5830. lctx.inp_out_ids = nullptr;
  5831. lctx.inp_KQ_mask = nullptr;
  5832. lctx.inp_K_shift = nullptr;
  5833. lctx.inp_mean = nullptr;
  5834. lctx.inp_cls = nullptr;
  5835. lctx.inp_s_copy = nullptr;
  5836. lctx.inp_s_mask = nullptr;
  5837. lctx.inp_s_seq = nullptr;
  5838. }
  5839. void free() {
  5840. if (ctx0) {
  5841. ggml_free(ctx0);
  5842. ctx0 = nullptr;
  5843. }
  5844. }
  5845. struct ggml_cgraph * build_k_shift() {
  5846. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5847. GGML_ASSERT(kv_self.size == n_ctx);
  5848. lctx.inp_K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx);
  5849. cb(lctx.inp_K_shift, "K_shift", -1);
  5850. ggml_set_input(lctx.inp_K_shift);
  5851. for (int il = 0; il < n_layer; ++il) {
  5852. struct ggml_tensor * tmp =
  5853. // we rotate only the first n_rot dimensions
  5854. ggml_rope_custom_inplace(ctx0,
  5855. ggml_view_3d(ctx0, kv_self.k_l[il],
  5856. n_embd_head_k, n_head_kv, n_ctx,
  5857. ggml_row_size(kv_self.k_l[il]->type, n_embd_head_k),
  5858. ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
  5859. 0),
  5860. lctx.inp_K_shift, n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  5861. ext_factor, attn_factor, beta_fast, beta_slow);
  5862. cb(tmp, "K_shifted", il);
  5863. ggml_build_forward_expand(gf, tmp);
  5864. }
  5865. return gf;
  5866. }
  5867. struct ggml_cgraph * build_s_copy() {
  5868. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5869. GGML_ASSERT(kv_self.recurrent);
  5870. struct ggml_tensor * state_copy = build_inp_s_copy();
  5871. for (int il = 0; il < n_layer; ++il) {
  5872. struct ggml_tensor * conv_states = ggml_reshape_2d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s(), kv_self.size);
  5873. struct ggml_tensor * ssm_states = ggml_reshape_2d(ctx0, kv_self.v_l[il], hparams.n_embd_v_s(), kv_self.size);
  5874. conv_states = ggml_get_rows(ctx0, conv_states, state_copy);
  5875. ssm_states = ggml_get_rows(ctx0, ssm_states, state_copy);
  5876. // TODO: name the intermediate tensors with cb()
  5877. ggml_build_forward_expand(gf, ggml_cpy(ctx0, conv_states, kv_self.k_l[il]));
  5878. ggml_build_forward_expand(gf, ggml_cpy(ctx0, ssm_states, kv_self.v_l[il]));
  5879. }
  5880. return gf;
  5881. }
  5882. struct ggml_cgraph * build_defrag(const std::vector<uint32_t> & ids) {
  5883. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5884. for (uint32_t i = 0; i < ids.size(); ++i) {
  5885. const uint32_t id = ids[i];
  5886. if (i == id || id == ids.size()) {
  5887. continue;
  5888. }
  5889. uint32_t nm = 1;
  5890. while (i + nm < ids.size() && ids[i + nm] == id + nm) {
  5891. nm++;
  5892. }
  5893. for (int il = 0; il < n_layer; ++il) {
  5894. ggml_tensor * view_k_src = ggml_view_2d(ctx0, kv_self.k_l[il],
  5895. n_embd_k_gqa, nm,
  5896. ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
  5897. ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*i));
  5898. ggml_tensor * view_k_dst = ggml_view_2d(ctx0, kv_self.k_l[il],
  5899. n_embd_k_gqa, nm,
  5900. ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
  5901. ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*id));
  5902. ggml_tensor * view_v_src;
  5903. ggml_tensor * view_v_dst;
  5904. if (flash_attn) {
  5905. // NOTE: the V cache is not transposed when using flash attention
  5906. view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il],
  5907. n_embd_v_gqa, nm,
  5908. ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa),
  5909. ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*i));
  5910. view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il],
  5911. n_embd_v_gqa, nm,
  5912. ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa),
  5913. ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*id));
  5914. } else {
  5915. view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il],
  5916. nm, n_embd_v_gqa,
  5917. ggml_row_size(kv_self.v_l[il]->type, kv_self.size),
  5918. ggml_row_size(kv_self.v_l[il]->type, i));
  5919. view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il],
  5920. nm, n_embd_v_gqa,
  5921. ggml_row_size(kv_self.v_l[il]->type, kv_self.size),
  5922. ggml_row_size(kv_self.v_l[il]->type, id));
  5923. }
  5924. ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_k_src, view_k_dst));
  5925. ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_v_src, view_v_dst));
  5926. }
  5927. i += nm - 1;
  5928. }
  5929. //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes);
  5930. return gf;
  5931. }
  5932. struct ggml_tensor * build_inp_pos() {
  5933. lctx.inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  5934. cb(lctx.inp_pos, "inp_pos", -1);
  5935. ggml_set_input(lctx.inp_pos);
  5936. return lctx.inp_pos;
  5937. }
  5938. struct ggml_tensor * build_inp_out_ids() {
  5939. lctx.inp_out_ids = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs);
  5940. cb(lctx.inp_out_ids, "inp_out_ids", -1);
  5941. ggml_set_input(lctx.inp_out_ids);
  5942. return lctx.inp_out_ids;
  5943. }
  5944. struct ggml_tensor * build_inp_KQ_mask(bool causal = true) {
  5945. if (causal) {
  5946. lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
  5947. } else {
  5948. lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
  5949. }
  5950. cb(lctx.inp_KQ_mask, "KQ_mask", -1);
  5951. ggml_set_input(lctx.inp_KQ_mask);
  5952. return flash_attn ? ggml_cast(ctx0, lctx.inp_KQ_mask, GGML_TYPE_F16) : lctx.inp_KQ_mask;
  5953. }
  5954. struct ggml_tensor * build_inp_mean() {
  5955. lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
  5956. cb(lctx.inp_mean, "inp_mean", -1);
  5957. ggml_set_input(lctx.inp_mean);
  5958. return lctx.inp_mean;
  5959. }
  5960. struct ggml_tensor * build_inp_cls() {
  5961. lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  5962. cb(lctx.inp_cls, "inp_cls", -1);
  5963. ggml_set_input(lctx.inp_cls);
  5964. return lctx.inp_cls;
  5965. }
  5966. struct ggml_tensor * build_inp_s_copy() {
  5967. lctx.inp_s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, kv_self.size);
  5968. cb(lctx.inp_s_copy, "inp_s_copy", -1);
  5969. ggml_set_input(lctx.inp_s_copy);
  5970. return lctx.inp_s_copy;
  5971. }
  5972. struct ggml_tensor * build_inp_s_mask() {
  5973. lctx.inp_s_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv);
  5974. cb(lctx.inp_s_mask, "inp_s_mask", -1);
  5975. ggml_set_input(lctx.inp_s_mask);
  5976. return lctx.inp_s_mask;
  5977. }
  5978. struct ggml_tensor * build_inp_s_seq() {
  5979. lctx.inp_s_seq = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens);
  5980. cb(lctx.inp_s_seq, "inp_s_seq", -1);
  5981. ggml_set_input(lctx.inp_s_seq);
  5982. return lctx.inp_s_seq;
  5983. }
  5984. struct ggml_cgraph * build_llama() {
  5985. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  5986. // mutable variable, needed during the last layer of the computation to skip unused tokens
  5987. int32_t n_tokens = this->n_tokens;
  5988. const int64_t n_embd_head = hparams.n_embd_head_v;
  5989. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  5990. GGML_ASSERT(n_embd_head == hparams.n_rot);
  5991. struct ggml_tensor * cur;
  5992. struct ggml_tensor * inpL;
  5993. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  5994. // inp_pos - contains the positions
  5995. struct ggml_tensor * inp_pos = build_inp_pos();
  5996. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  5997. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  5998. for (int il = 0; il < n_layer; ++il) {
  5999. struct ggml_tensor * inpSA = inpL;
  6000. // norm
  6001. cur = llm_build_norm(ctx0, inpL, hparams,
  6002. model.layers[il].attn_norm, NULL,
  6003. LLM_NORM_RMS, cb, il);
  6004. cb(cur, "attn_norm", il);
  6005. // self-attention
  6006. {
  6007. // compute Q and K and RoPE them
  6008. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6009. cb(Qcur, "Qcur", il);
  6010. if (model.layers[il].bq) {
  6011. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6012. cb(Qcur, "Qcur", il);
  6013. }
  6014. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6015. cb(Kcur, "Kcur", il);
  6016. if (model.layers[il].bk) {
  6017. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6018. cb(Kcur, "Kcur", il);
  6019. }
  6020. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6021. cb(Vcur, "Vcur", il);
  6022. if (model.layers[il].bv) {
  6023. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6024. cb(Vcur, "Vcur", il);
  6025. }
  6026. Qcur = ggml_rope_custom(
  6027. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  6028. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6029. ext_factor, attn_factor, beta_fast, beta_slow
  6030. );
  6031. cb(Qcur, "Qcur", il);
  6032. Kcur = ggml_rope_custom(
  6033. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  6034. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6035. ext_factor, attn_factor, beta_fast, beta_slow
  6036. );
  6037. cb(Kcur, "Kcur", il);
  6038. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6039. model.layers[il].wo, model.layers[il].bo,
  6040. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6041. }
  6042. if (il == n_layer - 1) {
  6043. // skip computing output for unused tokens
  6044. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6045. n_tokens = n_outputs;
  6046. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6047. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6048. }
  6049. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6050. cb(ffn_inp, "ffn_inp", il);
  6051. // feed-forward network
  6052. if (model.layers[il].ffn_gate_inp == nullptr) {
  6053. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6054. model.layers[il].ffn_norm, NULL,
  6055. LLM_NORM_RMS, cb, il);
  6056. cb(cur, "ffn_norm", il);
  6057. cur = llm_build_ffn(ctx0, cur,
  6058. model.layers[il].ffn_up, NULL,
  6059. model.layers[il].ffn_gate, NULL,
  6060. model.layers[il].ffn_down, NULL,
  6061. NULL,
  6062. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  6063. cb(cur, "ffn_out", il);
  6064. } else {
  6065. // MoE branch
  6066. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6067. model.layers[il].ffn_norm, NULL,
  6068. LLM_NORM_RMS, cb, il);
  6069. cb(cur, "ffn_norm", il);
  6070. cur = llm_build_moe_ffn(ctx0, cur,
  6071. model.layers[il].ffn_gate_inp,
  6072. model.layers[il].ffn_up_exps,
  6073. model.layers[il].ffn_gate_exps,
  6074. model.layers[il].ffn_down_exps,
  6075. n_expert, n_expert_used,
  6076. LLM_FFN_SILU, true,
  6077. cb, il);
  6078. cb(cur, "ffn_moe_out", il);
  6079. }
  6080. cur = ggml_add(ctx0, cur, ffn_inp);
  6081. cb(cur, "ffn_out", il);
  6082. ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
  6083. if (layer_dir != nullptr) {
  6084. cur = ggml_add(ctx0, cur, layer_dir);
  6085. }
  6086. cb(cur, "l_out", il);
  6087. // input for next layer
  6088. inpL = cur;
  6089. }
  6090. cur = inpL;
  6091. cur = llm_build_norm(ctx0, cur, hparams,
  6092. model.output_norm, NULL,
  6093. LLM_NORM_RMS, cb, -1);
  6094. cb(cur, "result_norm", -1);
  6095. // lm_head
  6096. cur = ggml_mul_mat(ctx0, model.output, cur);
  6097. cb(cur, "result_output", -1);
  6098. ggml_build_forward_expand(gf, cur);
  6099. return gf;
  6100. }
  6101. struct ggml_cgraph * build_baichuan() {
  6102. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6103. const int64_t n_embd_head = hparams.n_embd_head_v;
  6104. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6105. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6106. struct ggml_tensor * cur;
  6107. struct ggml_tensor * inpL;
  6108. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6109. // inp_pos - contains the positions
  6110. struct ggml_tensor * inp_pos = model.type == MODEL_7B ? build_inp_pos() : nullptr;
  6111. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6112. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6113. for (int il = 0; il < n_layer; ++il) {
  6114. struct ggml_tensor * inpSA = inpL;
  6115. cur = llm_build_norm(ctx0, inpL, hparams,
  6116. model.layers[il].attn_norm, NULL,
  6117. LLM_NORM_RMS, cb, il);
  6118. cb(cur, "attn_norm", il);
  6119. // self-attention
  6120. {
  6121. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6122. cb(Qcur, "Qcur", il);
  6123. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6124. cb(Kcur, "Kcur", il);
  6125. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6126. cb(Vcur, "Vcur", il);
  6127. switch (model.type) {
  6128. case MODEL_7B:
  6129. Qcur = ggml_rope_custom(
  6130. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  6131. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6132. ext_factor, attn_factor, beta_fast, beta_slow
  6133. );
  6134. Kcur = ggml_rope_custom(
  6135. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  6136. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6137. ext_factor, attn_factor, beta_fast, beta_slow
  6138. );
  6139. break;
  6140. case MODEL_13B:
  6141. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens);
  6142. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens);
  6143. break;
  6144. default:
  6145. GGML_ASSERT(false);
  6146. }
  6147. cb(Qcur, "Qcur", il);
  6148. cb(Kcur, "Kcur", il);
  6149. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6150. model.layers[il].wo, NULL,
  6151. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6152. }
  6153. if (il == n_layer - 1) {
  6154. // skip computing output for unused tokens
  6155. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6156. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6157. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6158. }
  6159. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6160. cb(ffn_inp, "ffn_inp", il);
  6161. // feed-forward network
  6162. {
  6163. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6164. model.layers[il].ffn_norm, NULL,
  6165. LLM_NORM_RMS, cb, il);
  6166. cb(cur, "ffn_norm", il);
  6167. cur = llm_build_ffn(ctx0, cur,
  6168. model.layers[il].ffn_up, NULL,
  6169. model.layers[il].ffn_gate, NULL,
  6170. model.layers[il].ffn_down, NULL,
  6171. NULL,
  6172. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  6173. cb(cur, "ffn_out", il);
  6174. }
  6175. cur = ggml_add(ctx0, cur, ffn_inp);
  6176. cb(cur, "l_out", il);
  6177. // input for next layer
  6178. inpL = cur;
  6179. }
  6180. cur = inpL;
  6181. cur = llm_build_norm(ctx0, cur, hparams,
  6182. model.output_norm, NULL,
  6183. LLM_NORM_RMS, cb, -1);
  6184. cb(cur, "result_norm", -1);
  6185. // lm_head
  6186. cur = ggml_mul_mat(ctx0, model.output, cur);
  6187. cb(cur, "result_output", -1);
  6188. ggml_build_forward_expand(gf, cur);
  6189. return gf;
  6190. }
  6191. struct ggml_cgraph * build_xverse() {
  6192. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6193. const int64_t n_embd_head = hparams.n_embd_head_v;
  6194. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6195. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6196. struct ggml_tensor * cur;
  6197. struct ggml_tensor * inpL;
  6198. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6199. // inp_pos - contains the positions
  6200. struct ggml_tensor * inp_pos = build_inp_pos();
  6201. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6202. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6203. for (int il = 0; il < n_layer; ++il) {
  6204. struct ggml_tensor * inpSA = inpL;
  6205. cur = llm_build_norm(ctx0, inpL, hparams,
  6206. model.layers[il].attn_norm, NULL,
  6207. LLM_NORM_RMS, cb, il);
  6208. cb(cur, "attn_norm", il);
  6209. // self-attention
  6210. {
  6211. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6212. cb(Qcur, "Qcur", il);
  6213. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6214. cb(Kcur, "Kcur", il);
  6215. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6216. cb(Vcur, "Vcur", il);
  6217. Qcur = ggml_rope_custom(
  6218. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  6219. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6220. ext_factor, attn_factor, beta_fast, beta_slow
  6221. );
  6222. cb(Qcur, "Qcur", il);
  6223. Kcur = ggml_rope_custom(
  6224. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  6225. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6226. ext_factor, attn_factor, beta_fast, beta_slow
  6227. );
  6228. cb(Kcur, "Kcur", il);
  6229. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6230. model.layers[il].wo, NULL,
  6231. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6232. }
  6233. if (il == n_layer - 1) {
  6234. // skip computing output for unused tokens
  6235. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6236. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6237. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6238. }
  6239. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6240. cb(ffn_inp, "ffn_inp", il);
  6241. // feed-forward network
  6242. {
  6243. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6244. model.layers[il].ffn_norm, NULL,
  6245. LLM_NORM_RMS, cb, il);
  6246. cb(cur, "ffn_norm", il);
  6247. cur = llm_build_ffn(ctx0, cur,
  6248. model.layers[il].ffn_up, NULL,
  6249. model.layers[il].ffn_gate, NULL,
  6250. model.layers[il].ffn_down, NULL,
  6251. NULL,
  6252. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  6253. cb(cur, "ffn_out", il);
  6254. }
  6255. cur = ggml_add(ctx0, cur, ffn_inp);
  6256. cb(cur, "l_out", il);
  6257. // input for next layer
  6258. inpL = cur;
  6259. }
  6260. cur = inpL;
  6261. cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM_RMS, cb, -1);
  6262. cb(cur, "result_norm", -1);
  6263. // lm_head
  6264. cur = ggml_mul_mat(ctx0, model.output, cur);
  6265. cb(cur, "result_output", -1);
  6266. ggml_build_forward_expand(gf, cur);
  6267. return gf;
  6268. }
  6269. struct ggml_cgraph * build_falcon() {
  6270. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6271. const int64_t n_embd_head = hparams.n_embd_head_v;
  6272. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6273. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6274. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6275. struct ggml_tensor * cur;
  6276. struct ggml_tensor * inpL;
  6277. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6278. // inp_pos - contains the positions
  6279. struct ggml_tensor * inp_pos = build_inp_pos();
  6280. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6281. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6282. for (int il = 0; il < n_layer; ++il) {
  6283. struct ggml_tensor * attn_norm;
  6284. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  6285. model.layers[il].attn_norm,
  6286. model.layers[il].attn_norm_b,
  6287. LLM_NORM, cb, il);
  6288. cb(attn_norm, "attn_norm", il);
  6289. // self-attention
  6290. {
  6291. if (model.layers[il].attn_norm_2) {
  6292. // Falcon-40B
  6293. cur = llm_build_norm(ctx0, inpL, hparams,
  6294. model.layers[il].attn_norm_2,
  6295. model.layers[il].attn_norm_2_b,
  6296. LLM_NORM, cb, il);
  6297. cb(cur, "attn_norm_2", il);
  6298. } else {
  6299. cur = attn_norm;
  6300. }
  6301. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  6302. cb(cur, "wqkv", il);
  6303. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6304. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6305. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6306. cb(Qcur, "Qcur", il);
  6307. cb(Kcur, "Kcur", il);
  6308. cb(Vcur, "Vcur", il);
  6309. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6310. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6311. // using mode = 2 for neox mode
  6312. Qcur = ggml_rope_custom(
  6313. ctx0, Qcur, inp_pos, n_rot, rope_type, 0, n_orig_ctx,
  6314. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  6315. );
  6316. cb(Qcur, "Qcur", il);
  6317. Kcur = ggml_rope_custom(
  6318. ctx0, Kcur, inp_pos, n_rot, rope_type, 0, n_orig_ctx,
  6319. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  6320. );
  6321. cb(Kcur, "Kcur", il);
  6322. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6323. model.layers[il].wo, NULL,
  6324. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6325. }
  6326. if (il == n_layer - 1) {
  6327. // skip computing output for unused tokens
  6328. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6329. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6330. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6331. attn_norm = ggml_get_rows(ctx0, attn_norm, inp_out_ids);
  6332. }
  6333. struct ggml_tensor * ffn_inp = cur;
  6334. // feed forward
  6335. {
  6336. cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result
  6337. model.layers[il].ffn_up, NULL,
  6338. NULL, NULL,
  6339. model.layers[il].ffn_down, NULL,
  6340. NULL,
  6341. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  6342. cb(cur, "ffn_out", il);
  6343. }
  6344. cur = ggml_add(ctx0, cur, ffn_inp);
  6345. cb(cur, "l_out", il);
  6346. cur = ggml_add(ctx0, cur, inpL);
  6347. cb(cur, "l_out", il);
  6348. // input for next layer
  6349. inpL = cur;
  6350. }
  6351. cur = inpL;
  6352. // norm
  6353. cur = llm_build_norm(ctx0, cur, hparams,
  6354. model.output_norm,
  6355. model.output_norm_b,
  6356. LLM_NORM, cb, -1);
  6357. cb(cur, "result_norm", -1);
  6358. cur = ggml_mul_mat(ctx0, model.output, cur);
  6359. cb(cur, "result_output", -1);
  6360. ggml_build_forward_expand(gf, cur);
  6361. return gf;
  6362. }
  6363. struct ggml_cgraph * build_grok() {
  6364. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6365. // mutable variable, needed during the last layer of the computation to skip unused tokens
  6366. int32_t n_tokens = this->n_tokens;
  6367. const int64_t n_embd_head = hparams.n_embd_head_v;
  6368. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6369. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6370. struct ggml_tensor * cur;
  6371. struct ggml_tensor * inpL;
  6372. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6373. // multiply by embedding_multiplier_scale of 78.38367176906169
  6374. inpL = ggml_scale(ctx0, inpL, 78.38367176906169f);
  6375. // inp_pos - contains the positions
  6376. struct ggml_tensor * inp_pos = build_inp_pos();
  6377. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6378. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6379. for (int il = 0; il < n_layer; ++il) {
  6380. struct ggml_tensor * inpSA = inpL;
  6381. // norm
  6382. cur = llm_build_norm(ctx0, inpL, hparams,
  6383. model.layers[il].attn_norm, NULL,
  6384. LLM_NORM_RMS, cb, il);
  6385. cb(cur, "attn_norm", il);
  6386. // self-attention
  6387. {
  6388. // compute Q and K and RoPE them
  6389. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6390. cb(Qcur, "Qcur", il);
  6391. if (model.layers[il].bq) {
  6392. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6393. cb(Qcur, "Qcur", il);
  6394. }
  6395. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6396. cb(Kcur, "Kcur", il);
  6397. if (model.layers[il].bk) {
  6398. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6399. cb(Kcur, "Kcur", il);
  6400. }
  6401. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6402. cb(Vcur, "Vcur", il);
  6403. if (model.layers[il].bv) {
  6404. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6405. cb(Vcur, "Vcur", il);
  6406. }
  6407. Qcur = ggml_rope_custom(
  6408. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  6409. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6410. ext_factor, attn_factor, beta_fast, beta_slow
  6411. );
  6412. cb(Qcur, "Qcur", il);
  6413. Kcur = ggml_rope_custom(
  6414. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  6415. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6416. ext_factor, attn_factor, beta_fast, beta_slow
  6417. );
  6418. cb(Kcur, "Kcur", il);
  6419. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6420. model.layers[il].wo, model.layers[il].bo,
  6421. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
  6422. }
  6423. if (il == n_layer - 1) {
  6424. // skip computing output for unused tokens
  6425. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6426. n_tokens = n_outputs;
  6427. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6428. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6429. }
  6430. // Grok
  6431. // if attn_out_norm is present then apply it before adding the input
  6432. if (model.layers[il].attn_out_norm) {
  6433. cur = llm_build_norm(ctx0, cur, hparams,
  6434. model.layers[il].attn_out_norm, NULL,
  6435. LLM_NORM_RMS, cb, il);
  6436. cb(cur, "attn_out_norm", il);
  6437. }
  6438. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6439. cb(ffn_inp, "ffn_inp", il);
  6440. // feed-forward network
  6441. // MoE branch
  6442. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6443. model.layers[il].ffn_norm, NULL,
  6444. LLM_NORM_RMS, cb, il);
  6445. cb(cur, "ffn_norm", il);
  6446. cur = llm_build_moe_ffn(ctx0, cur,
  6447. model.layers[il].ffn_gate_inp,
  6448. model.layers[il].ffn_up_exps,
  6449. model.layers[il].ffn_gate_exps,
  6450. model.layers[il].ffn_down_exps,
  6451. n_expert, n_expert_used,
  6452. LLM_FFN_GELU, true,
  6453. cb, il);
  6454. cb(cur, "ffn_moe_out", il);
  6455. // Grok
  6456. // if layer_out_norm is present then apply it before adding the input
  6457. // Idea: maybe ffn_out_norm is a better name
  6458. if (model.layers[il].layer_out_norm) {
  6459. cur = llm_build_norm(ctx0, cur, hparams,
  6460. model.layers[il].layer_out_norm, NULL,
  6461. LLM_NORM_RMS, cb, il);
  6462. cb(cur, "layer_out_norm", il);
  6463. }
  6464. cur = ggml_add(ctx0, cur, ffn_inp);
  6465. cb(cur, "ffn_out", il);
  6466. ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
  6467. if (layer_dir != nullptr) {
  6468. cur = ggml_add(ctx0, cur, layer_dir);
  6469. }
  6470. cb(cur, "l_out", il);
  6471. // input for next layer
  6472. inpL = cur;
  6473. }
  6474. cur = inpL;
  6475. cur = llm_build_norm(ctx0, cur, hparams,
  6476. model.output_norm, NULL,
  6477. LLM_NORM_RMS, cb, -1);
  6478. cb(cur, "result_norm", -1);
  6479. // lm_head
  6480. cur = ggml_mul_mat(ctx0, model.output, cur);
  6481. // Grok
  6482. // multiply logits by output_multiplier_scale of 0.5773502691896257
  6483. cur = ggml_scale(ctx0, cur, 0.5773502691896257f);
  6484. cb(cur, "result_output", -1);
  6485. ggml_build_forward_expand(gf, cur);
  6486. return gf;
  6487. }
  6488. struct ggml_cgraph * build_dbrx() {
  6489. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6490. // mutable variable, needed during the last layer of the computation to skip unused tokens
  6491. int32_t n_tokens = this->n_tokens;
  6492. const int64_t n_embd_head = hparams.n_embd_head_v;
  6493. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6494. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6495. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6496. struct ggml_tensor * cur;
  6497. struct ggml_tensor * inpL;
  6498. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6499. // inp_pos - contains the positions
  6500. struct ggml_tensor * inp_pos = build_inp_pos();
  6501. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6502. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6503. for (int il = 0; il < n_layer; ++il) {
  6504. struct ggml_tensor * inpSA = inpL;
  6505. // norm
  6506. cur = llm_build_norm(ctx0, inpL, hparams,
  6507. model.layers[il].attn_norm, NULL,
  6508. LLM_NORM, cb, il);
  6509. cb(cur, "attn_norm", il);
  6510. // self-attention
  6511. {
  6512. struct ggml_tensor * Qcur = nullptr;
  6513. struct ggml_tensor * Kcur = nullptr;
  6514. struct ggml_tensor * Vcur = nullptr;
  6515. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  6516. cb(cur, "wqkv", il);
  6517. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  6518. cb(cur, "wqkv_clamped", il);
  6519. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6520. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6521. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6522. cb(Qcur, "Qcur", il);
  6523. cb(Kcur, "Kcur", il);
  6524. cb(Vcur, "Vcur", il);
  6525. Qcur = ggml_rope_custom(
  6526. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  6527. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6528. ext_factor, attn_factor, beta_fast, beta_slow
  6529. );
  6530. cb(Qcur, "Qcur", il);
  6531. Kcur = ggml_rope_custom(
  6532. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  6533. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6534. ext_factor, attn_factor, beta_fast, beta_slow
  6535. );
  6536. cb(Kcur, "Kcur", il);
  6537. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6538. model.layers[il].wo, NULL,
  6539. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6540. }
  6541. if (il == n_layer - 1) {
  6542. // skip computing output for unused tokens
  6543. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6544. n_tokens = n_outputs;
  6545. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6546. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6547. }
  6548. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6549. cb(ffn_inp, "ffn_inp", il);
  6550. // feed-forward network
  6551. // MoE branch
  6552. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6553. model.layers[il].attn_out_norm, NULL,
  6554. LLM_NORM, cb, il);
  6555. cb(cur, "attn_out_norm", il);
  6556. cur = llm_build_moe_ffn(ctx0, cur,
  6557. model.layers[il].ffn_gate_inp,
  6558. model.layers[il].ffn_up_exps,
  6559. model.layers[il].ffn_gate_exps,
  6560. model.layers[il].ffn_down_exps,
  6561. n_expert, n_expert_used,
  6562. LLM_FFN_SILU, true,
  6563. cb, il);
  6564. cb(cur, "ffn_moe_out", il);
  6565. cur = ggml_add(ctx0, cur, ffn_inp);
  6566. cb(cur, "ffn_out", il);
  6567. ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
  6568. if (layer_dir != nullptr) {
  6569. cur = ggml_add(ctx0, cur, layer_dir);
  6570. }
  6571. cb(cur, "l_out", il);
  6572. // input for next layer
  6573. inpL = cur;
  6574. }
  6575. cur = inpL;
  6576. cur = llm_build_norm(ctx0, cur, hparams,
  6577. model.output_norm, NULL,
  6578. LLM_NORM, cb, -1);
  6579. cb(cur, "result_norm", -1);
  6580. // lm_head
  6581. cur = ggml_mul_mat(ctx0, model.output, cur);
  6582. cb(cur, "result_output", -1);
  6583. ggml_build_forward_expand(gf, cur);
  6584. return gf;
  6585. }
  6586. struct ggml_cgraph * build_starcoder() {
  6587. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6588. const int64_t n_embd_head = hparams.n_embd_head_v;
  6589. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6590. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6591. struct ggml_tensor * cur;
  6592. struct ggml_tensor * inpL;
  6593. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6594. // inp_pos - contains the positions
  6595. struct ggml_tensor * inp_pos = build_inp_pos();
  6596. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6597. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6598. struct ggml_tensor * pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  6599. cb(pos, "pos_embd", -1);
  6600. inpL = ggml_add(ctx0, inpL, pos);
  6601. cb(inpL, "inpL", -1);
  6602. for (int il = 0; il < n_layer; ++il) {
  6603. cur = llm_build_norm(ctx0, inpL, hparams,
  6604. model.layers[il].attn_norm,
  6605. model.layers[il].attn_norm_b,
  6606. LLM_NORM, cb, il);
  6607. cb(cur, "attn_norm", il);
  6608. // self-attention
  6609. {
  6610. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  6611. cb(cur, "wqkv", il);
  6612. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  6613. cb(cur, "bqkv", il);
  6614. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6615. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6616. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6617. cb(Qcur, "Qcur", il);
  6618. cb(Kcur, "Kcur", il);
  6619. cb(Vcur, "Vcur", il);
  6620. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6621. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6622. model.layers[il].wo, model.layers[il].bo,
  6623. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6624. }
  6625. if (il == n_layer - 1) {
  6626. // skip computing output for unused tokens
  6627. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6628. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6629. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6630. }
  6631. // add the input
  6632. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  6633. cb(ffn_inp, "ffn_inp", il);
  6634. // FF
  6635. {
  6636. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6637. model.layers[il].ffn_norm,
  6638. model.layers[il].ffn_norm_b,
  6639. LLM_NORM, cb, il);
  6640. cb(cur, "ffn_norm", il);
  6641. cur = llm_build_ffn(ctx0, cur,
  6642. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  6643. NULL, NULL,
  6644. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  6645. NULL,
  6646. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  6647. cb(cur, "ffn_out", il);
  6648. }
  6649. inpL = ggml_add(ctx0, cur, ffn_inp);
  6650. cb(inpL, "l_out", il);
  6651. }
  6652. cur = llm_build_norm(ctx0, inpL, hparams,
  6653. model.output_norm,
  6654. model.output_norm_b,
  6655. LLM_NORM, cb, -1);
  6656. cb(cur, "result_norm", -1);
  6657. cur = ggml_mul_mat(ctx0, model.output, cur);
  6658. cb(cur, "result_output", -1);
  6659. ggml_build_forward_expand(gf, cur);
  6660. return gf;
  6661. }
  6662. struct ggml_cgraph * build_refact() {
  6663. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6664. const int64_t n_embd_head = hparams.n_embd_head_v;
  6665. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6666. struct ggml_tensor * cur;
  6667. struct ggml_tensor * inpL;
  6668. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6669. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6670. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6671. for (int il = 0; il < n_layer; ++il) {
  6672. struct ggml_tensor * inpSA = inpL;
  6673. cur = llm_build_norm(ctx0, inpL, hparams,
  6674. model.layers[il].attn_norm, NULL,
  6675. LLM_NORM_RMS, cb, il);
  6676. cb(cur, "attn_norm", il);
  6677. // self-attention
  6678. {
  6679. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6680. cb(Qcur, "Qcur", il);
  6681. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6682. cb(Kcur, "Kcur", il);
  6683. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6684. cb(Vcur, "Vcur", il);
  6685. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6686. cb(Kcur, "Kcur", il);
  6687. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6688. cb(Qcur, "Qcur", il);
  6689. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6690. model.layers[il].wo, NULL,
  6691. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6692. }
  6693. if (il == n_layer - 1) {
  6694. // skip computing output for unused tokens
  6695. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6696. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6697. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6698. }
  6699. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6700. cb(ffn_inp, "ffn_inp", il);
  6701. // feed-forward network
  6702. {
  6703. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6704. model.layers[il].ffn_norm, NULL,
  6705. LLM_NORM_RMS, cb, il);
  6706. cb(cur, "ffn_norm", il);
  6707. cur = llm_build_ffn(ctx0, cur,
  6708. model.layers[il].ffn_up, NULL,
  6709. model.layers[il].ffn_gate, NULL,
  6710. model.layers[il].ffn_down, NULL,
  6711. NULL,
  6712. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  6713. cb(cur, "ffn_out", il);
  6714. }
  6715. cur = ggml_add(ctx0, cur, ffn_inp);
  6716. cb(cur, "l_out", il);
  6717. // input for next layer
  6718. inpL = cur;
  6719. }
  6720. cur = inpL;
  6721. cur = llm_build_norm(ctx0, cur, hparams,
  6722. model.output_norm, NULL,
  6723. LLM_NORM_RMS, cb, -1);
  6724. cb(cur, "result_norm", -1);
  6725. // lm_head
  6726. cur = ggml_mul_mat(ctx0, model.output, cur);
  6727. cb(cur, "result_output", -1);
  6728. ggml_build_forward_expand(gf, cur);
  6729. return gf;
  6730. }
  6731. struct ggml_cgraph * build_bert() {
  6732. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6733. const int64_t n_embd_head = hparams.n_embd_head_v;
  6734. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6735. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6736. struct ggml_tensor * cur;
  6737. struct ggml_tensor * inpL;
  6738. struct ggml_tensor * inp_pos = nullptr;
  6739. if (model.arch != LLM_ARCH_JINA_BERT_V2) {
  6740. inp_pos = build_inp_pos();
  6741. }
  6742. struct ggml_tensor * inp_mean = build_inp_mean();
  6743. struct ggml_tensor * inp_cls = build_inp_cls();
  6744. // construct input embeddings (token, type, position)
  6745. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6746. // token types are hardcoded to zero ("Sentence A")
  6747. struct ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0);
  6748. inpL = ggml_add(ctx0, inpL, type_row0);
  6749. if (model.arch == LLM_ARCH_BERT) {
  6750. inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL);
  6751. }
  6752. cb(inpL, "inp_embd", -1);
  6753. // embed layer norm
  6754. inpL = llm_build_norm(ctx0, inpL, hparams, model.tok_norm, model.tok_norm_b, LLM_NORM, cb, -1);
  6755. cb(inpL, "inp_norm", -1);
  6756. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6757. struct ggml_tensor * KQ_mask = build_inp_KQ_mask(false);
  6758. // iterate layers
  6759. for (int il = 0; il < n_layer; ++il) {
  6760. struct ggml_tensor * cur = inpL;
  6761. struct ggml_tensor * Qcur;
  6762. struct ggml_tensor * Kcur;
  6763. struct ggml_tensor * Vcur;
  6764. // self-attention
  6765. if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) {
  6766. Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq);
  6767. cb(Qcur, "Qcur", il);
  6768. if (model.layers[il].attn_q_norm) {
  6769. Qcur = llm_build_norm(ctx0, Qcur, hparams,
  6770. model.layers[il].attn_q_norm,
  6771. model.layers[il].attn_q_norm_b,
  6772. LLM_NORM, cb, il);
  6773. }
  6774. Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), model.layers[il].bk);
  6775. cb(Kcur, "Kcur", il);
  6776. if (model.layers[il].attn_k_norm) {
  6777. Kcur = llm_build_norm(ctx0, Kcur, hparams,
  6778. model.layers[il].attn_k_norm,
  6779. model.layers[il].attn_k_norm_b,
  6780. LLM_NORM, cb, il);
  6781. }
  6782. Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), model.layers[il].bv);
  6783. cb(Vcur, "Vcur", il);
  6784. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6785. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6786. } else {
  6787. // compute Q and K and RoPE them
  6788. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  6789. cb(cur, "wqkv", il);
  6790. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6791. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6792. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6793. cb(Qcur, "Qcur", il);
  6794. cb(Kcur, "Kcur", il);
  6795. cb(Vcur, "Vcur", il);
  6796. Qcur = ggml_rope_custom(
  6797. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  6798. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6799. ext_factor, attn_factor, beta_fast, beta_slow
  6800. );
  6801. cb(Qcur, "Qcur", il);
  6802. Kcur = ggml_rope_custom(
  6803. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  6804. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  6805. ext_factor, attn_factor, beta_fast, beta_slow
  6806. );
  6807. cb(Kcur, "Kcur", il);
  6808. }
  6809. struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
  6810. struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
  6811. struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  6812. cb(kq, "kq", il);
  6813. kq = ggml_soft_max_ext(ctx0, kq, KQ_mask, 1.0f/sqrtf(float(n_embd_head)), hparams.f_max_alibi_bias);
  6814. cb(kq, "kq_soft_max_ext", il);
  6815. struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_tokens)));
  6816. cb(v, "v", il);
  6817. struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_tokens, n_embd_head, n_head_kv), kq);
  6818. cb(kqv, "kqv", il);
  6819. struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  6820. cb(kqv_merged, "kqv_merged", il);
  6821. cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens);
  6822. cb(cur, "kqv_merged_cont", il);
  6823. ggml_build_forward_expand(gf, cur);
  6824. cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur);
  6825. if (model.layers[il].bo) {
  6826. cb(cur, "kqv_wo", il);
  6827. }
  6828. if (model.layers[il].bo) {
  6829. cur = ggml_add(ctx0, cur, model.layers[il].bo);
  6830. }
  6831. cb(cur, "kqv_out", il);
  6832. if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) {
  6833. // skip computing output for unused tokens
  6834. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6835. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6836. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6837. }
  6838. // re-add the layer input
  6839. cur = ggml_add(ctx0, cur, inpL);
  6840. // attention layer norm
  6841. cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, cb, il);
  6842. struct ggml_tensor * ffn_inp = cur;
  6843. cb(ffn_inp, "ffn_inp", il);
  6844. // feed-forward network
  6845. if (model.arch == LLM_ARCH_BERT) {
  6846. cur = llm_build_ffn(ctx0, cur,
  6847. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  6848. NULL, NULL,
  6849. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  6850. NULL,
  6851. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  6852. } else if (model.arch == LLM_ARCH_JINA_BERT_V2) {
  6853. cur = llm_build_ffn(ctx0, cur,
  6854. model.layers[il].ffn_up, NULL,
  6855. model.layers[il].ffn_gate, NULL,
  6856. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  6857. NULL,
  6858. LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
  6859. } else {
  6860. cur = llm_build_ffn(ctx0, cur,
  6861. model.layers[il].ffn_up, NULL,
  6862. model.layers[il].ffn_gate, NULL,
  6863. model.layers[il].ffn_down, NULL,
  6864. NULL,
  6865. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  6866. }
  6867. cb(cur, "ffn_out", il);
  6868. // attentions bypass the intermediate layer
  6869. cur = ggml_add(ctx0, cur, ffn_inp);
  6870. // output layer norm
  6871. cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, cb, il);
  6872. // input for next layer
  6873. inpL = cur;
  6874. }
  6875. // final output
  6876. cur = inpL;
  6877. cb(cur, "result_embd", -1);
  6878. // pooling layer
  6879. switch (pooling_type) {
  6880. case LLAMA_POOLING_TYPE_NONE:
  6881. {
  6882. // nop
  6883. } break;
  6884. case LLAMA_POOLING_TYPE_MEAN:
  6885. {
  6886. cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, cur)), inp_mean);
  6887. cb(cur, "result_embd_pooled", -1);
  6888. } break;
  6889. case LLAMA_POOLING_TYPE_CLS:
  6890. {
  6891. cur = ggml_get_rows(ctx0, cur, inp_cls);
  6892. cb(cur, "result_embd_pooled", -1);
  6893. } break;
  6894. case LLAMA_POOLING_TYPE_UNSPECIFIED:
  6895. {
  6896. GGML_ASSERT(false && "Invalid pooling type");
  6897. } break;
  6898. }
  6899. ggml_build_forward_expand(gf, cur);
  6900. return gf;
  6901. }
  6902. struct ggml_cgraph * build_bloom() {
  6903. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6904. const int64_t n_embd_head = hparams.n_embd_head_v;
  6905. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6906. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6907. struct ggml_tensor * cur;
  6908. struct ggml_tensor * inpL;
  6909. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6910. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6911. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6912. inpL = llm_build_norm(ctx0, inpL, hparams,
  6913. model.tok_norm,
  6914. model.tok_norm_b,
  6915. LLM_NORM, cb, -1);
  6916. cb(inpL, "inp_norm", -1);
  6917. for (int il = 0; il < n_layer; ++il) {
  6918. cur = llm_build_norm(ctx0, inpL, hparams,
  6919. model.layers[il].attn_norm,
  6920. model.layers[il].attn_norm_b,
  6921. LLM_NORM, cb, il);
  6922. cb(cur, "attn_norm", il);
  6923. // self-attention
  6924. {
  6925. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  6926. cb(cur, "wqkv", il);
  6927. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  6928. cb(cur, "bqkv", il);
  6929. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6930. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6931. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6932. cb(Qcur, "Qcur", il);
  6933. cb(Kcur, "Kcur", il);
  6934. cb(Vcur, "Vcur", il);
  6935. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6936. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6937. model.layers[il].wo, model.layers[il].bo,
  6938. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6939. }
  6940. if (il == n_layer - 1) {
  6941. // skip computing output for unused tokens
  6942. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6943. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6944. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6945. }
  6946. // Add the input
  6947. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  6948. cb(ffn_inp, "ffn_inp", il);
  6949. // FF
  6950. {
  6951. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6952. model.layers[il].ffn_norm,
  6953. model.layers[il].ffn_norm_b,
  6954. LLM_NORM, cb, il);
  6955. cb(cur, "ffn_norm", il);
  6956. cur = llm_build_ffn(ctx0, cur,
  6957. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  6958. NULL, NULL,
  6959. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  6960. NULL,
  6961. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  6962. cb(cur, "ffn_out", il);
  6963. }
  6964. inpL = ggml_add(ctx0, cur, ffn_inp);
  6965. cb(inpL, "l_out", il);
  6966. }
  6967. cur = llm_build_norm(ctx0, inpL, hparams,
  6968. model.output_norm,
  6969. model.output_norm_b,
  6970. LLM_NORM, cb, -1);
  6971. cb(cur, "result_norm", -1);
  6972. cur = ggml_mul_mat(ctx0, model.output, cur);
  6973. cb(cur, "result_output", -1);
  6974. ggml_build_forward_expand(gf, cur);
  6975. return gf;
  6976. }
  6977. struct ggml_cgraph * build_mpt() {
  6978. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6979. const int64_t n_embd_head = hparams.n_embd_head_v;
  6980. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6981. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6982. struct ggml_tensor * cur;
  6983. struct ggml_tensor * pos;
  6984. struct ggml_tensor * inpL;
  6985. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6986. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6987. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6988. if (model.pos_embd) {
  6989. // inp_pos - contains the positions
  6990. struct ggml_tensor * inp_pos = build_inp_pos();
  6991. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  6992. cb(pos, "pos_embd", -1);
  6993. inpL = ggml_add(ctx0, inpL, pos);
  6994. cb(inpL, "inpL", -1);
  6995. }
  6996. for (int il = 0; il < n_layer; ++il) {
  6997. struct ggml_tensor * attn_norm;
  6998. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  6999. model.layers[il].attn_norm,
  7000. model.layers[il].attn_norm_b,
  7001. LLM_NORM, cb, il);
  7002. cb(attn_norm, "attn_norm", il);
  7003. // self-attention
  7004. {
  7005. cur = attn_norm;
  7006. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  7007. cb(cur, "wqkv", il);
  7008. if (model.layers[il].bqkv){
  7009. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7010. cb(cur, "bqkv", il);
  7011. }
  7012. if (hparams.f_clamp_kqv > 0.0f) {
  7013. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  7014. cb(cur, "wqkv_clamped", il);
  7015. }
  7016. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7017. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7018. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  7019. cb(Qcur, "Qcur", il);
  7020. cb(Kcur, "Kcur", il);
  7021. cb(Vcur, "Vcur", il);
  7022. // Q/K Layernorm
  7023. if (model.layers[il].attn_q_norm) {
  7024. Qcur = llm_build_norm(ctx0, Qcur, hparams,
  7025. model.layers[il].attn_q_norm,
  7026. model.layers[il].attn_q_norm_b,
  7027. LLM_NORM, cb, il);
  7028. cb(Qcur, "Qcur", il);
  7029. Kcur = llm_build_norm(ctx0, Kcur, hparams,
  7030. model.layers[il].attn_k_norm,
  7031. model.layers[il].attn_k_norm_b,
  7032. LLM_NORM, cb, il);
  7033. cb(Kcur, "Kcur", il);
  7034. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7035. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7036. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7037. model.layers[il].wo, model.layers[il].bo,
  7038. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7039. } else {
  7040. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7041. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7042. model.layers[il].wo, model.layers[il].bo,
  7043. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7044. }
  7045. }
  7046. if (il == n_layer - 1) {
  7047. // skip computing output for unused tokens
  7048. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7049. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7050. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7051. }
  7052. // Add the input
  7053. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  7054. cb(ffn_inp, "ffn_inp", il);
  7055. // feed forward
  7056. {
  7057. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7058. model.layers[il].ffn_norm,
  7059. model.layers[il].ffn_norm_b,
  7060. LLM_NORM, cb, il);
  7061. cb(cur, "ffn_norm", il);
  7062. cur = llm_build_ffn(ctx0, cur,
  7063. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  7064. NULL, NULL,
  7065. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  7066. model.layers[il].ffn_act,
  7067. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  7068. cb(cur, "ffn_out", il);
  7069. }
  7070. cur = ggml_add(ctx0, cur, ffn_inp);
  7071. cb(cur, "l_out", il);
  7072. // input for next layer
  7073. inpL = cur;
  7074. }
  7075. cur = inpL;
  7076. cur = llm_build_norm(ctx0, cur, hparams,
  7077. model.output_norm,
  7078. model.output_norm_b,
  7079. LLM_NORM, cb, -1);
  7080. cb(cur, "result_norm", -1);
  7081. cur = ggml_mul_mat(ctx0, model.output, cur);
  7082. cb(cur, "result_output", -1);
  7083. ggml_build_forward_expand(gf, cur);
  7084. return gf;
  7085. }
  7086. struct ggml_cgraph * build_stablelm() {
  7087. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  7088. const int64_t n_embd_head = hparams.n_embd_head_v;
  7089. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7090. struct ggml_tensor * cur;
  7091. struct ggml_tensor * inpL;
  7092. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7093. // inp_pos - contains the positions
  7094. struct ggml_tensor * inp_pos = build_inp_pos();
  7095. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7096. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7097. for (int il = 0; il < n_layer; ++il) {
  7098. // norm
  7099. cur = llm_build_norm(ctx0, inpL, hparams,
  7100. model.layers[il].attn_norm,
  7101. model.layers[il].attn_norm_b,
  7102. LLM_NORM, cb, il);
  7103. cb(cur, "attn_norm", il);
  7104. struct ggml_tensor * inpSA = cur;
  7105. // self-attention
  7106. {
  7107. // compute Q and K and RoPE them
  7108. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  7109. cb(Qcur, "Qcur", il);
  7110. if (model.layers[il].bq) {
  7111. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7112. cb(Qcur, "Qcur", il);
  7113. }
  7114. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  7115. cb(Kcur, "Kcur", il);
  7116. if (model.layers[il].bk) {
  7117. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7118. cb(Kcur, "Kcur", il);
  7119. }
  7120. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  7121. cb(Vcur, "Vcur", il);
  7122. if (model.layers[il].bv) {
  7123. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7124. cb(Vcur, "Vcur", il);
  7125. }
  7126. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7127. cb(Qcur, "Qcur", il);
  7128. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7129. cb(Kcur, "Kcur", il);
  7130. if (model.layers[il].attn_q_norm) {
  7131. Qcur = llm_build_norm(ctx0, Qcur, hparams,
  7132. model.layers[il].attn_q_norm,
  7133. NULL,
  7134. LLM_NORM, cb, il);
  7135. cb(Qcur, "Qcur", il);
  7136. }
  7137. if (model.layers[il].attn_k_norm) {
  7138. Kcur = llm_build_norm(ctx0, Kcur, hparams,
  7139. model.layers[il].attn_k_norm,
  7140. NULL,
  7141. LLM_NORM, cb, il);
  7142. cb(Kcur, "Kcur", il);
  7143. }
  7144. Qcur = ggml_rope_custom(
  7145. ctx0, Qcur, inp_pos,
  7146. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7147. ext_factor, attn_factor, beta_fast, beta_slow
  7148. );
  7149. cb(Qcur, "Qcur", il);
  7150. Kcur = ggml_rope_custom(
  7151. ctx0, Kcur, inp_pos,
  7152. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7153. ext_factor, attn_factor, beta_fast, beta_slow
  7154. );
  7155. cb(Kcur, "Kcur", il);
  7156. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7157. model.layers[il].wo, NULL,
  7158. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7159. }
  7160. if (il == n_layer - 1) {
  7161. // skip computing output for unused tokens
  7162. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7163. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7164. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7165. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7166. }
  7167. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  7168. cb(ffn_inp, "ffn_inp", il);
  7169. // feed-forward network
  7170. {
  7171. if (model.layers[il].ffn_norm) {
  7172. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7173. model.layers[il].ffn_norm,
  7174. model.layers[il].ffn_norm_b,
  7175. LLM_NORM, cb, il);
  7176. cb(cur, "ffn_norm", il);
  7177. } else {
  7178. // parallel residual
  7179. cur = inpSA;
  7180. }
  7181. cur = llm_build_ffn(ctx0, cur,
  7182. model.layers[il].ffn_up, NULL,
  7183. model.layers[il].ffn_gate, NULL,
  7184. model.layers[il].ffn_down, NULL,
  7185. NULL,
  7186. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7187. cb(cur, "ffn_out", il);
  7188. }
  7189. cur = ggml_add(ctx0, cur, ffn_inp);
  7190. cb(cur, "l_out", il);
  7191. // input for next layer
  7192. inpL = cur;
  7193. }
  7194. cur = inpL;
  7195. cur = llm_build_norm(ctx0, cur, hparams,
  7196. model.output_norm,
  7197. model.output_norm_b,
  7198. LLM_NORM, cb, -1);
  7199. cb(cur, "result_norm", -1);
  7200. // lm_head
  7201. cur = ggml_mul_mat(ctx0, model.output, cur);
  7202. cb(cur, "result_output", -1);
  7203. ggml_build_forward_expand(gf, cur);
  7204. return gf;
  7205. }
  7206. struct ggml_cgraph * build_qwen() {
  7207. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7208. const int64_t n_embd_head = hparams.n_embd_head_v;
  7209. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7210. struct ggml_tensor * cur;
  7211. struct ggml_tensor * inpL;
  7212. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7213. // inp_pos - contains the positions
  7214. struct ggml_tensor * inp_pos = build_inp_pos();
  7215. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7216. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7217. for (int il = 0; il < n_layer; ++il) {
  7218. struct ggml_tensor * inpSA = inpL;
  7219. cur = llm_build_norm(ctx0, inpL, hparams,
  7220. model.layers[il].attn_norm, NULL,
  7221. LLM_NORM_RMS, cb, il);
  7222. cb(cur, "attn_norm", il);
  7223. // self-attention
  7224. {
  7225. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  7226. cb(cur, "wqkv", il);
  7227. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7228. cb(cur, "bqkv", il);
  7229. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7230. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7231. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd)));
  7232. cb(Qcur, "Qcur", il);
  7233. cb(Kcur, "Kcur", il);
  7234. cb(Vcur, "Vcur", il);
  7235. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7236. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7237. // using mode = 2 for neox mode
  7238. Qcur = ggml_rope_custom(
  7239. ctx0, Qcur, inp_pos, n_rot, rope_type, 0, n_orig_ctx,
  7240. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  7241. );
  7242. cb(Qcur, "Qcur", il);
  7243. Kcur = ggml_rope_custom(
  7244. ctx0, Kcur, inp_pos, n_rot, rope_type, 0, n_orig_ctx,
  7245. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  7246. );
  7247. cb(Kcur, "Kcur", il);
  7248. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7249. model.layers[il].wo, NULL,
  7250. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7251. }
  7252. if (il == n_layer - 1) {
  7253. // skip computing output for unused tokens
  7254. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7255. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7256. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7257. }
  7258. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7259. cb(ffn_inp, "ffn_inp", il);
  7260. // feed-forward forward
  7261. {
  7262. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7263. model.layers[il].ffn_norm, NULL,
  7264. LLM_NORM_RMS, cb, il);
  7265. cb(cur, "ffn_norm", il);
  7266. cur = llm_build_ffn(ctx0, cur,
  7267. model.layers[il].ffn_up, NULL,
  7268. model.layers[il].ffn_gate, NULL,
  7269. model.layers[il].ffn_down, NULL,
  7270. NULL,
  7271. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7272. cb(cur, "ffn_out", il);
  7273. }
  7274. cur = ggml_add(ctx0, cur, ffn_inp);
  7275. cb(cur, "l_out", il);
  7276. // input for next layer
  7277. inpL = cur;
  7278. }
  7279. cur = inpL;
  7280. cur = llm_build_norm(ctx0, cur, hparams,
  7281. model.output_norm, NULL,
  7282. LLM_NORM_RMS, cb, -1);
  7283. cb(cur, "result_norm", -1);
  7284. // lm_head
  7285. cur = ggml_mul_mat(ctx0, model.output, cur);
  7286. cb(cur, "result_output", -1);
  7287. ggml_build_forward_expand(gf, cur);
  7288. return gf;
  7289. }
  7290. struct ggml_cgraph * build_qwen2() {
  7291. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7292. const int64_t n_embd_head = hparams.n_embd_head_v;
  7293. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7294. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7295. struct ggml_tensor * cur;
  7296. struct ggml_tensor * inpL;
  7297. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7298. // inp_pos - contains the positions
  7299. struct ggml_tensor * inp_pos = build_inp_pos();
  7300. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7301. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7302. for (int il = 0; il < n_layer; ++il) {
  7303. struct ggml_tensor * inpSA = inpL;
  7304. // norm
  7305. cur = llm_build_norm(ctx0, inpL, hparams,
  7306. model.layers[il].attn_norm, NULL,
  7307. LLM_NORM_RMS, cb, il);
  7308. cb(cur, "attn_norm", il);
  7309. // self-attention
  7310. {
  7311. // compute Q and K and RoPE them
  7312. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  7313. cb(Qcur, "Qcur", il);
  7314. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7315. cb(Qcur, "Qcur", il);
  7316. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  7317. cb(Kcur, "Kcur", il);
  7318. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7319. cb(Kcur, "Kcur", il);
  7320. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  7321. cb(Vcur, "Vcur", il);
  7322. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7323. cb(Vcur, "Vcur", il);
  7324. Qcur = ggml_rope_custom(
  7325. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  7326. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7327. ext_factor, attn_factor, beta_fast, beta_slow
  7328. );
  7329. cb(Qcur, "Qcur", il);
  7330. Kcur = ggml_rope_custom(
  7331. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  7332. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7333. ext_factor, attn_factor, beta_fast, beta_slow
  7334. );
  7335. cb(Kcur, "Kcur", il);
  7336. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7337. model.layers[il].wo, model.layers[il].bo,
  7338. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7339. }
  7340. if (il == n_layer - 1) {
  7341. // skip computing output for unused tokens
  7342. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7343. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7344. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7345. }
  7346. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7347. cb(ffn_inp, "ffn_inp", il);
  7348. // feed-forward network
  7349. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7350. model.layers[il].ffn_norm, NULL,
  7351. LLM_NORM_RMS, cb, il);
  7352. cb(cur, "ffn_norm", il);
  7353. cur = llm_build_ffn(ctx0, cur,
  7354. model.layers[il].ffn_up, NULL,
  7355. model.layers[il].ffn_gate, NULL,
  7356. model.layers[il].ffn_down, NULL,
  7357. NULL,
  7358. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7359. cb(cur, "ffn_out", il);
  7360. cur = ggml_add(ctx0, cur, ffn_inp);
  7361. cb(cur, "l_out", il);
  7362. // input for next layer
  7363. inpL = cur;
  7364. }
  7365. cur = inpL;
  7366. cur = llm_build_norm(ctx0, cur, hparams,
  7367. model.output_norm, NULL,
  7368. LLM_NORM_RMS, cb, -1);
  7369. cb(cur, "result_norm", -1);
  7370. // lm_head
  7371. cur = ggml_mul_mat(ctx0, model.output, cur);
  7372. cb(cur, "result_output", -1);
  7373. ggml_build_forward_expand(gf, cur);
  7374. return gf;
  7375. }
  7376. struct ggml_cgraph * build_qwen2moe() {
  7377. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7378. // mutable variable, needed during the last layer of the computation to skip unused tokens
  7379. int32_t n_tokens = this->n_tokens;
  7380. const int64_t n_embd_head = hparams.n_embd_head_v;
  7381. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7382. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7383. struct ggml_tensor * cur;
  7384. struct ggml_tensor * inpL;
  7385. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7386. // inp_pos - contains the positions
  7387. struct ggml_tensor * inp_pos = build_inp_pos();
  7388. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7389. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7390. for (int il = 0; il < n_layer; ++il) {
  7391. struct ggml_tensor * inpSA = inpL;
  7392. // norm
  7393. cur = llm_build_norm(ctx0, inpL, hparams,
  7394. model.layers[il].attn_norm, NULL,
  7395. LLM_NORM_RMS, cb, il);
  7396. cb(cur, "attn_norm", il);
  7397. // self_attention
  7398. {
  7399. // compute Q and K and RoPE them
  7400. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  7401. cb(Qcur, "Qcur", il);
  7402. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7403. cb(Qcur, "Qcur", il);
  7404. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  7405. cb(Kcur, "Kcur", il);
  7406. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7407. cb(Kcur, "Kcur", il);
  7408. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  7409. cb(Vcur, "Vcur", il);
  7410. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7411. cb(Vcur, "Vcur", il);
  7412. Qcur = ggml_rope_custom(
  7413. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  7414. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7415. ext_factor, attn_factor, beta_fast, beta_slow
  7416. );
  7417. cb(Qcur, "Qcur", il);
  7418. Kcur = ggml_rope_custom(
  7419. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  7420. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7421. ext_factor, attn_factor, beta_fast, beta_slow
  7422. );
  7423. cb(Kcur, "Kcur", il);
  7424. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7425. model.layers[il].wo, model.layers[il].bo,
  7426. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7427. }
  7428. if (il == n_layer - 1) {
  7429. // skip computing output for unused tokens
  7430. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7431. n_tokens = n_outputs;
  7432. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7433. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7434. }
  7435. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7436. cb(ffn_inp, "ffn_inp", il);
  7437. // MoE branch
  7438. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7439. model.layers[il].ffn_norm, NULL,
  7440. LLM_NORM_RMS, cb, il);
  7441. cb(cur, "ffn_norm", il);
  7442. ggml_tensor * moe_out =
  7443. llm_build_moe_ffn(ctx0, cur,
  7444. model.layers[il].ffn_gate_inp,
  7445. model.layers[il].ffn_up_exps,
  7446. model.layers[il].ffn_gate_exps,
  7447. model.layers[il].ffn_down_exps,
  7448. n_expert, n_expert_used,
  7449. LLM_FFN_SILU, false,
  7450. cb, il);
  7451. cb(cur, "ffn_moe_out", il);
  7452. // FFN shared expert
  7453. {
  7454. ggml_tensor * cur_gate_inp = ggml_mul_mat(ctx0, model.layers[il].ffn_gate_inp_shexp, cur);
  7455. cb(cur_gate_inp, "ffn_shexp_gate_inp", il);
  7456. // sigmoid
  7457. ggml_tensor * cur_gate = ggml_div(ctx0, ggml_silu(ctx0, cur_gate_inp), cur_gate_inp);
  7458. cb(cur_gate, "ffn_shexp_gate", il);
  7459. ggml_tensor * cur_ffn = llm_build_ffn(ctx0, cur,
  7460. model.layers[il].ffn_up_shexp, NULL,
  7461. model.layers[il].ffn_gate_shexp, NULL,
  7462. model.layers[il].ffn_down_shexp, NULL,
  7463. NULL,
  7464. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7465. cb(cur_ffn, "ffn_shexp", il);
  7466. ggml_tensor * ffn_shexp_out = ggml_mul(ctx0, cur_ffn, cur_gate);
  7467. cb(ffn_shexp_out, "ffn_shexp_out", il);
  7468. moe_out = ggml_add(ctx0, moe_out, ffn_shexp_out);
  7469. cb(moe_out, "ffn_out", il);
  7470. cur = moe_out;
  7471. }
  7472. cur = ggml_add(ctx0, cur, ffn_inp);
  7473. cb(cur, "l_out", il);
  7474. // input for next layer
  7475. inpL = cur;
  7476. }
  7477. cur = inpL;
  7478. cur = llm_build_norm(ctx0, cur, hparams,
  7479. model.output_norm, NULL,
  7480. LLM_NORM_RMS, cb, -1);
  7481. cb(cur, "result_norm", -1);
  7482. // lm_head
  7483. cur = ggml_mul_mat(ctx0, model.output, cur);
  7484. cb(cur, "result_output", -1);
  7485. ggml_build_forward_expand(gf, cur);
  7486. return gf;
  7487. }
  7488. struct ggml_cgraph * build_phi2() {
  7489. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7490. const int64_t n_embd_head = hparams.n_embd_head_v;
  7491. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7492. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7493. struct ggml_tensor * cur;
  7494. struct ggml_tensor * attn_norm_output;
  7495. struct ggml_tensor * ffn_output;
  7496. struct ggml_tensor * inpL;
  7497. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7498. // inp_pos - contains the positions
  7499. struct ggml_tensor * inp_pos = build_inp_pos();
  7500. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7501. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7502. for (int il = 0; il < n_layer; ++il) {
  7503. attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
  7504. model.layers[il].attn_norm,
  7505. model.layers[il].attn_norm_b,
  7506. LLM_NORM, cb, il);
  7507. cb(attn_norm_output, "attn_norm", il);
  7508. // self-attention
  7509. {
  7510. struct ggml_tensor * Qcur = nullptr;
  7511. struct ggml_tensor * Kcur = nullptr;
  7512. struct ggml_tensor * Vcur = nullptr;
  7513. if (model.layers[il].wqkv) {
  7514. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output);
  7515. cb(cur, "wqkv", il);
  7516. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7517. cb(cur, "bqkv", il);
  7518. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7519. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7520. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  7521. } else {
  7522. Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq);
  7523. Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk);
  7524. Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv);
  7525. }
  7526. cb(Qcur, "Qcur", il);
  7527. cb(Kcur, "Kcur", il);
  7528. cb(Vcur, "Vcur", il);
  7529. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7530. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7531. Qcur = ggml_rope_custom(
  7532. ctx0, Qcur, inp_pos, n_rot, rope_type, 0, n_orig_ctx,
  7533. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  7534. );
  7535. cb(Qcur, "Qcur", il);
  7536. // with phi2, we scale the Q to avoid precision issues
  7537. // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
  7538. Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head)));
  7539. cb(Qcur, "Qcur", il);
  7540. Kcur = ggml_rope_custom(
  7541. ctx0, Kcur, inp_pos, n_rot, rope_type, 0, n_orig_ctx,
  7542. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  7543. );
  7544. cb(Kcur, "Kcur", il);
  7545. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7546. model.layers[il].wo, model.layers[il].bo,
  7547. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
  7548. }
  7549. if (il == n_layer - 1) {
  7550. // skip computing output for unused tokens
  7551. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7552. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7553. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7554. attn_norm_output = ggml_get_rows(ctx0, attn_norm_output, inp_out_ids);
  7555. }
  7556. // FF
  7557. {
  7558. ffn_output = llm_build_ffn(ctx0, attn_norm_output,
  7559. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  7560. NULL, NULL,
  7561. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  7562. NULL,
  7563. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  7564. cb(ffn_output, "ffn_out", il);
  7565. }
  7566. cur = ggml_add(ctx0, cur, ffn_output);
  7567. cb(cur, "l_out", il);
  7568. cur = ggml_add(ctx0, cur, inpL);
  7569. cb(cur, "l_out", il);
  7570. inpL = cur;
  7571. }
  7572. cur = llm_build_norm(ctx0, inpL, hparams,
  7573. model.output_norm,
  7574. model.output_norm_b,
  7575. LLM_NORM, cb, -1);
  7576. cb(cur, "result_norm", -1);
  7577. cur = ggml_mul_mat(ctx0, model.output, cur);
  7578. cb(cur, "result_output_no_bias", -1);
  7579. cur = ggml_add(ctx0, cur, model.output_b);
  7580. cb(cur, "result_output", -1);
  7581. ggml_build_forward_expand(gf, cur);
  7582. return gf;
  7583. }
  7584. struct ggml_cgraph * build_phi3() {
  7585. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7586. const int64_t n_embd_head = hparams.n_embd_head_v;
  7587. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7588. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7589. struct ggml_tensor * cur;
  7590. struct ggml_tensor * inpL;
  7591. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7592. // inp_pos - contains the positions
  7593. struct ggml_tensor * inp_pos = build_inp_pos();
  7594. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7595. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7596. for (int il = 0; il < n_layer; ++il) {
  7597. auto residual = inpL;
  7598. // self-attention
  7599. {
  7600. struct ggml_tensor* attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
  7601. model.layers[il].attn_norm,
  7602. NULL,
  7603. LLM_NORM_RMS, cb, il);
  7604. cb(attn_norm_output, "attn_norm", il);
  7605. struct ggml_tensor * Qcur = nullptr;
  7606. struct ggml_tensor * Kcur = nullptr;
  7607. struct ggml_tensor * Vcur = nullptr;
  7608. if (model.layers[il].wqkv) {
  7609. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output);
  7610. cb(cur, "wqkv", il);
  7611. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd)));
  7612. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd)));
  7613. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa)));
  7614. }
  7615. else {
  7616. Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq);
  7617. Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk);
  7618. Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv);
  7619. }
  7620. cb(Qcur, "Qcur", il);
  7621. cb(Kcur, "Kcur", il);
  7622. cb(Vcur, "Vcur", il);
  7623. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7624. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7625. Qcur = ggml_rope_custom(
  7626. ctx0, Qcur, inp_pos, n_rot, rope_type, 0, n_orig_ctx,
  7627. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  7628. );
  7629. cb(Qcur, "Qcur", il);
  7630. Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head)));
  7631. cb(Qcur, "Qcur", il);
  7632. Kcur = ggml_rope_custom(
  7633. ctx0, Kcur, inp_pos, n_rot, rope_type, 0, n_orig_ctx,
  7634. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  7635. );
  7636. cb(Kcur, "Kcur", il);
  7637. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7638. model.layers[il].wo, model.layers[il].bo,
  7639. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
  7640. }
  7641. if (il == n_layer - 1) {
  7642. // skip computing output for unused tokens
  7643. struct ggml_tensor* inp_out_ids = build_inp_out_ids();
  7644. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7645. residual = ggml_get_rows(ctx0, residual, inp_out_ids);
  7646. }
  7647. cur = ggml_add(ctx0, cur, residual);
  7648. residual = cur;
  7649. cur = llm_build_norm(ctx0, cur, hparams,
  7650. model.layers[il].ffn_norm, NULL,
  7651. LLM_NORM_RMS, cb, il);
  7652. cb(cur, "ffn_norm", il);
  7653. // FF
  7654. // special-case: the up and gate tensors are merged into a single tensor
  7655. // TOOD: support into llm_build_ffn
  7656. {
  7657. struct ggml_tensor* up = ggml_mul_mat(ctx0, model.layers[il].ffn_up, cur);
  7658. cb(up, "ffn_up", il);
  7659. auto g = ggml_cont(ctx0, ggml_view_2d(ctx0, up, up->ne[0] / 2, up->ne[1], ggml_row_size(up->type, up->ne[0]), 0));
  7660. auto y = ggml_cont(ctx0, ggml_view_2d(ctx0, up, up->ne[0] / 2, up->ne[1], ggml_row_size(up->type, up->ne[0]), up->nb[1] / 2));
  7661. y = ggml_mul(ctx0, y, ggml_silu(ctx0, g));
  7662. cb(y, "ffn_gate", il);
  7663. auto down = ggml_mul_mat(ctx0, model.layers[il].ffn_down, y);
  7664. cb(down, "ffn_down", il);
  7665. cur = down;
  7666. cb(cur, "ffn_out", il);
  7667. }
  7668. cur = ggml_add(ctx0, residual, cur);
  7669. cb(cur, "l_out", il);
  7670. inpL = cur;
  7671. }
  7672. cur = llm_build_norm(ctx0, inpL, hparams,
  7673. model.output_norm,
  7674. NULL,
  7675. LLM_NORM_RMS, cb, -1);
  7676. cb(cur, "result_norm", -1);
  7677. cur = ggml_mul_mat(ctx0, model.output, cur);
  7678. cb(cur, "result_output", -1);
  7679. ggml_build_forward_expand(gf, cur);
  7680. return gf;
  7681. }
  7682. struct ggml_cgraph * build_plamo() {
  7683. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  7684. const int64_t n_embd_head = hparams.n_embd_head_v;
  7685. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7686. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7687. struct ggml_tensor * cur;
  7688. struct ggml_tensor * inpL;
  7689. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7690. // inp_pos - contains the positions
  7691. struct ggml_tensor * inp_pos = build_inp_pos();
  7692. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7693. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7694. for (int il = 0; il < n_layer; ++il) {
  7695. // norm
  7696. cur = llm_build_norm(ctx0, inpL, hparams,
  7697. model.layers[il].attn_norm, NULL,
  7698. LLM_NORM_RMS, cb, il);
  7699. cb(cur, "attn_norm", il);
  7700. struct ggml_tensor * attention_norm = cur;
  7701. // self-attention
  7702. {
  7703. // compute Q and K and RoPE them
  7704. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  7705. cb(Qcur, "Qcur", il);
  7706. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  7707. cb(Kcur, "Kcur", il);
  7708. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  7709. cb(Vcur, "Vcur", il);
  7710. Qcur = ggml_rope_custom(
  7711. ctx0, ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens), inp_pos,
  7712. n_embd_head, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7713. ext_factor, attn_factor, beta_fast, beta_slow);
  7714. cb(Qcur, "Qcur", il);
  7715. Kcur = ggml_rope_custom(
  7716. ctx0, ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens), inp_pos,
  7717. n_embd_head, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7718. ext_factor, attn_factor, beta_fast, beta_slow);
  7719. cb(Kcur, "Kcur", il);
  7720. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7721. model.layers[il].wo, NULL,
  7722. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7723. }
  7724. struct ggml_tensor * sa_out = cur;
  7725. cur = attention_norm;
  7726. if (il == n_layer - 1) {
  7727. // skip computing output for unused tokens
  7728. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7729. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7730. sa_out = ggml_get_rows(ctx0, sa_out, inp_out_ids);
  7731. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7732. }
  7733. // feed-forward network
  7734. {
  7735. cur = llm_build_ffn(ctx0, cur,
  7736. model.layers[il].ffn_up, NULL,
  7737. model.layers[il].ffn_gate, NULL,
  7738. model.layers[il].ffn_down, NULL,
  7739. NULL,
  7740. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7741. cb(cur, "ffn_out", il);
  7742. }
  7743. cur = ggml_add(ctx0, cur, sa_out);
  7744. cb(cur, "l_out", il);
  7745. cur = ggml_add(ctx0, cur, inpL);
  7746. cb(cur, "l_out", il);
  7747. // input for next layer
  7748. inpL = cur;
  7749. }
  7750. cur = inpL;
  7751. cur = llm_build_norm(ctx0, cur, hparams,
  7752. model.output_norm, NULL,
  7753. LLM_NORM_RMS, cb, -1);
  7754. cb(cur, "result_norm", -1);
  7755. // lm_head
  7756. cur = ggml_mul_mat(ctx0, model.output, cur);
  7757. cb(cur, "result_output", -1);
  7758. ggml_build_forward_expand(gf, cur);
  7759. return gf;
  7760. }
  7761. struct ggml_cgraph * build_gpt2() {
  7762. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7763. const int64_t n_embd_head = hparams.n_embd_head_v;
  7764. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7765. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7766. struct ggml_tensor * cur;
  7767. struct ggml_tensor * pos;
  7768. struct ggml_tensor * inpL;
  7769. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7770. // inp_pos - contains the positions
  7771. struct ggml_tensor * inp_pos = build_inp_pos();
  7772. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7773. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7774. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  7775. cb(pos, "pos_embd", -1);
  7776. inpL = ggml_add(ctx0, inpL, pos);
  7777. cb(inpL, "inpL", -1);
  7778. for (int il = 0; il < n_layer; ++il) {
  7779. cur = llm_build_norm(ctx0, inpL, hparams,
  7780. model.layers[il].attn_norm,
  7781. model.layers[il].attn_norm_b,
  7782. LLM_NORM, cb, il);
  7783. cb(cur, "attn_norm", il);
  7784. // self-attention
  7785. {
  7786. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  7787. cb(cur, "wqkv", il);
  7788. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7789. cb(cur, "bqkv", il);
  7790. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7791. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7792. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  7793. cb(Qcur, "Qcur", il);
  7794. cb(Kcur, "Kcur", il);
  7795. cb(Vcur, "Vcur", il);
  7796. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7797. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7798. model.layers[il].wo, model.layers[il].bo,
  7799. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7800. }
  7801. if (il == n_layer - 1) {
  7802. // skip computing output for unused tokens
  7803. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7804. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7805. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7806. }
  7807. // add the input
  7808. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  7809. cb(ffn_inp, "ffn_inp", il);
  7810. // FF
  7811. {
  7812. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7813. model.layers[il].ffn_norm,
  7814. model.layers[il].ffn_norm_b,
  7815. LLM_NORM, cb, il);
  7816. cb(cur, "ffn_norm", il);
  7817. cur = llm_build_ffn(ctx0, cur,
  7818. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  7819. NULL, NULL,
  7820. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  7821. NULL,
  7822. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  7823. cb(cur, "ffn_out", il);
  7824. }
  7825. inpL = ggml_add(ctx0, cur, ffn_inp);
  7826. cb(inpL, "l_out", il);
  7827. }
  7828. cur = llm_build_norm(ctx0, inpL, hparams,
  7829. model.output_norm,
  7830. model.output_norm_b,
  7831. LLM_NORM, cb, -1);
  7832. cb(cur, "result_norm", -1);
  7833. cur = ggml_mul_mat(ctx0, model.output, cur);
  7834. cb(cur, "result_output", -1);
  7835. ggml_build_forward_expand(gf, cur);
  7836. return gf;
  7837. }
  7838. struct ggml_cgraph * build_codeshell() {
  7839. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7840. const int64_t n_embd_head = hparams.n_embd_head_v;
  7841. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7842. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7843. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7844. struct ggml_tensor * cur;
  7845. struct ggml_tensor * inpL;
  7846. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7847. // inp_pos - contains the positions
  7848. struct ggml_tensor * inp_pos = build_inp_pos();
  7849. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7850. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7851. for (int il = 0; il < n_layer; ++il) {
  7852. cur = llm_build_norm(ctx0, inpL, hparams,
  7853. model.layers[il].attn_norm,
  7854. model.layers[il].attn_norm_b,
  7855. LLM_NORM, cb, il);
  7856. cb(cur, "attn_norm", il);
  7857. // self-attention
  7858. {
  7859. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  7860. cb(cur, "wqkv", il);
  7861. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7862. cb(cur, "bqkv", il);
  7863. struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7864. struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7865. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  7866. cb(tmpq, "tmpq", il);
  7867. cb(tmpk, "tmpk", il);
  7868. cb(Vcur, "Vcur", il);
  7869. struct ggml_tensor * Qcur = ggml_rope_custom(
  7870. ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), inp_pos,
  7871. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7872. ext_factor, attn_factor, beta_fast, beta_slow
  7873. );
  7874. cb(Qcur, "Qcur", il);
  7875. struct ggml_tensor * Kcur = ggml_rope_custom(
  7876. ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), inp_pos,
  7877. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7878. ext_factor, attn_factor, beta_fast, beta_slow
  7879. );
  7880. cb(Kcur, "Kcur", il);
  7881. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7882. model.layers[il].wo, model.layers[il].bo,
  7883. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7884. }
  7885. if (il == n_layer - 1) {
  7886. // skip computing output for unused tokens
  7887. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7888. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7889. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7890. }
  7891. // add the input
  7892. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  7893. cb(ffn_inp, "ffn_inp", il);
  7894. // FF
  7895. {
  7896. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7897. model.layers[il].ffn_norm,
  7898. model.layers[il].ffn_norm_b,
  7899. LLM_NORM, cb, il);
  7900. cb(cur, "ffn_norm", il);
  7901. cur = llm_build_ffn(ctx0, cur,
  7902. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  7903. NULL, NULL,
  7904. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  7905. NULL,
  7906. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  7907. cb(cur, "ffn_out", il);
  7908. }
  7909. inpL = ggml_add(ctx0, cur, ffn_inp);
  7910. cb(inpL, "l_out", il);
  7911. }
  7912. cur = llm_build_norm(ctx0, inpL, hparams,
  7913. model.output_norm,
  7914. model.output_norm_b,
  7915. LLM_NORM, cb, -1);
  7916. cb(cur, "result_norm", -1);
  7917. cur = ggml_mul_mat(ctx0, model.output, cur);
  7918. cb(cur, "result_output", -1);
  7919. ggml_build_forward_expand(gf, cur);
  7920. return gf;
  7921. }
  7922. struct ggml_cgraph * build_orion() {
  7923. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7924. const int64_t n_embd_head = hparams.n_embd_head_v;
  7925. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7926. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7927. struct ggml_tensor * cur;
  7928. struct ggml_tensor * inpL;
  7929. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7930. // inp_pos - contains the positions
  7931. struct ggml_tensor * inp_pos = build_inp_pos();
  7932. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7933. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7934. for (int il = 0; il < n_layer; ++il) {
  7935. struct ggml_tensor * inpSA = inpL;
  7936. // norm
  7937. cur = llm_build_norm(ctx0, inpL, hparams,
  7938. model.layers[il].attn_norm, model.layers[il].attn_norm_b,
  7939. LLM_NORM, cb, il);
  7940. cb(cur, "attn_norm", il);
  7941. // self-attention
  7942. {
  7943. // compute Q and K and RoPE them
  7944. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  7945. cb(Qcur, "Qcur", il);
  7946. // if (model.layers[il].bq) {
  7947. // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7948. // cb(Qcur, "Qcur", il);
  7949. // }
  7950. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  7951. cb(Kcur, "Kcur", il);
  7952. // if (model.layers[il].bk) {
  7953. // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7954. // cb(Kcur, "Kcur", il);
  7955. // }
  7956. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  7957. cb(Vcur, "Vcur", il);
  7958. // if (model.layers[il].bv) {
  7959. // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7960. // cb(Vcur, "Vcur", il);
  7961. // }
  7962. Qcur = ggml_rope_custom(
  7963. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  7964. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7965. ext_factor, attn_factor, beta_fast, beta_slow
  7966. );
  7967. cb(Qcur, "Qcur", il);
  7968. Kcur = ggml_rope_custom(
  7969. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  7970. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  7971. ext_factor, attn_factor, beta_fast, beta_slow
  7972. );
  7973. cb(Kcur, "Kcur", il);
  7974. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7975. model.layers[il].wo, NULL,
  7976. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7977. }
  7978. if (il == n_layer - 1) {
  7979. // skip computing output for unused tokens
  7980. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7981. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7982. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7983. }
  7984. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7985. cb(ffn_inp, "ffn_inp", il);
  7986. // feed-forward network
  7987. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7988. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  7989. LLM_NORM, cb, il);
  7990. cb(cur, "ffn_norm", il);
  7991. cur = llm_build_ffn(ctx0, cur,
  7992. model.layers[il].ffn_up, NULL,
  7993. model.layers[il].ffn_gate, NULL,
  7994. model.layers[il].ffn_down, NULL,
  7995. NULL,
  7996. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7997. cb(cur, "ffn_out", il);
  7998. cur = ggml_add(ctx0, cur, ffn_inp);
  7999. cb(cur, "l_out", il);
  8000. // input for next layer
  8001. inpL = cur;
  8002. }
  8003. cur = inpL;
  8004. cur = llm_build_norm(ctx0, cur, hparams,
  8005. model.output_norm, model.output_norm_b,
  8006. LLM_NORM, cb, -1);
  8007. cb(cur, "result_norm", -1);
  8008. // lm_head
  8009. cur = ggml_mul_mat(ctx0, model.output, cur);
  8010. cb(cur, "result_output", -1);
  8011. ggml_build_forward_expand(gf, cur);
  8012. return gf;
  8013. }
  8014. struct ggml_cgraph * build_internlm2() {
  8015. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8016. const int64_t n_embd_head = hparams.n_embd_head_v;
  8017. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8018. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8019. struct ggml_tensor * cur;
  8020. struct ggml_tensor * inpL;
  8021. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8022. // inp_pos - contains the positions
  8023. struct ggml_tensor * inp_pos = build_inp_pos();
  8024. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8025. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8026. for (int il = 0; il < n_layer; ++il) {
  8027. struct ggml_tensor * inpSA = inpL;
  8028. // norm
  8029. cur = llm_build_norm(ctx0, inpL, hparams,
  8030. model.layers[il].attn_norm, NULL,
  8031. LLM_NORM_RMS, cb, il);
  8032. cb(cur, "attn_norm", il);
  8033. // self-attention
  8034. {
  8035. // compute Q and K and RoPE them
  8036. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8037. cb(Qcur, "Qcur", il);
  8038. if (model.layers[il].bq) {
  8039. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8040. cb(Qcur, "Qcur", il);
  8041. }
  8042. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8043. cb(Kcur, "Kcur", il);
  8044. if (model.layers[il].bk) {
  8045. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8046. cb(Kcur, "Kcur", il);
  8047. }
  8048. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8049. cb(Vcur, "Vcur", il);
  8050. if (model.layers[il].bv) {
  8051. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8052. cb(Vcur, "Vcur", il);
  8053. }
  8054. Qcur = ggml_rope_custom(
  8055. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  8056. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8057. ext_factor, attn_factor, beta_fast, beta_slow
  8058. );
  8059. cb(Qcur, "Qcur", il);
  8060. Kcur = ggml_rope_custom(
  8061. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  8062. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8063. ext_factor, attn_factor, beta_fast, beta_slow
  8064. );
  8065. cb(Kcur, "Kcur", il);
  8066. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8067. model.layers[il].wo, model.layers[il].bo,
  8068. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8069. }
  8070. if (il == n_layer - 1) {
  8071. // skip computing output for unused tokens
  8072. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8073. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8074. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8075. }
  8076. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8077. cb(ffn_inp, "ffn_inp", il);
  8078. // feed-forward network
  8079. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  8080. model.layers[il].ffn_norm, NULL,
  8081. LLM_NORM_RMS, cb, il);
  8082. cb(cur, "ffn_norm", il);
  8083. cur = llm_build_ffn(ctx0, cur,
  8084. model.layers[il].ffn_up, NULL,
  8085. model.layers[il].ffn_gate, NULL,
  8086. model.layers[il].ffn_down, NULL,
  8087. NULL,
  8088. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  8089. cb(cur, "ffn_out", il);
  8090. cur = ggml_add(ctx0, cur, ffn_inp);
  8091. cb(cur, "l_out", il);
  8092. // input for next layer
  8093. inpL = cur;
  8094. }
  8095. cur = inpL;
  8096. cur = llm_build_norm(ctx0, cur, hparams,
  8097. model.output_norm, NULL,
  8098. LLM_NORM_RMS, cb, -1);
  8099. cb(cur, "result_norm", -1);
  8100. // lm_head
  8101. cur = ggml_mul_mat(ctx0, model.output, cur);
  8102. cb(cur, "result_output", -1);
  8103. ggml_build_forward_expand(gf, cur);
  8104. return gf;
  8105. }
  8106. // ref: https://arxiv.org/abs/2203.03466
  8107. // https://github.com/ggerganov/llama.cpp/issues/5276#issuecomment-1925774738
  8108. // based on the original build_llama() function
  8109. struct ggml_cgraph * build_minicpm() {
  8110. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8111. const int64_t n_embd_head = hparams.n_embd_head_v;
  8112. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8113. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8114. const int64_t n_embd = hparams.n_embd;
  8115. //TODO: if the model varies, these parameters need to be read from the model
  8116. const int64_t n_embd_base = 256;
  8117. const float scale_embd = 12.0f;
  8118. const float scale_depth = 1.4f;
  8119. struct ggml_tensor * cur;
  8120. struct ggml_tensor * inpL;
  8121. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8122. // scale the input embeddings
  8123. inpL = ggml_scale(ctx0, inpL, scale_embd);
  8124. cb(inpL, "inp_scaled", -1);
  8125. // inp_pos - contains the positions
  8126. struct ggml_tensor * inp_pos = build_inp_pos();
  8127. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8128. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8129. for (int il = 0; il < n_layer; ++il) {
  8130. struct ggml_tensor * inpSA = inpL;
  8131. // norm
  8132. cur = llm_build_norm(ctx0, inpL, hparams,
  8133. model.layers[il].attn_norm, NULL,
  8134. LLM_NORM_RMS, cb, il);
  8135. cb(cur, "attn_norm", il);
  8136. // self-attention
  8137. {
  8138. // compute Q and K and RoPE them
  8139. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8140. cb(Qcur, "Qcur", il);
  8141. if (model.layers[il].bq) {
  8142. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8143. cb(Qcur, "Qcur", il);
  8144. }
  8145. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8146. cb(Kcur, "Kcur", il);
  8147. if (model.layers[il].bk) {
  8148. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8149. cb(Kcur, "Kcur", il);
  8150. }
  8151. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8152. cb(Vcur, "Vcur", il);
  8153. if (model.layers[il].bv) {
  8154. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8155. cb(Vcur, "Vcur", il);
  8156. }
  8157. Qcur = ggml_rope_custom(
  8158. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  8159. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8160. ext_factor, attn_factor, beta_fast, beta_slow
  8161. );
  8162. cb(Qcur, "Qcur", il);
  8163. Kcur = ggml_rope_custom(
  8164. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  8165. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8166. ext_factor, attn_factor, beta_fast, beta_slow
  8167. );
  8168. cb(Kcur, "Kcur", il);
  8169. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8170. model.layers[il].wo, model.layers[il].bo,
  8171. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8172. }
  8173. if (il == n_layer - 1) {
  8174. // skip computing output for unused tokens
  8175. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8176. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8177. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8178. }
  8179. // scale_res - scale the hidden states for residual connection
  8180. const float scale_res = scale_depth/sqrtf(float(n_layer));
  8181. cur = ggml_scale(ctx0, cur, scale_res);
  8182. cb(cur, "hidden_scaled", -1);
  8183. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8184. cb(ffn_inp, "ffn_inp", il);
  8185. // feed-forward network
  8186. {
  8187. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  8188. model.layers[il].ffn_norm, NULL,
  8189. LLM_NORM_RMS, cb, il);
  8190. cb(cur, "ffn_norm", il);
  8191. cur = llm_build_ffn(ctx0, cur,
  8192. model.layers[il].ffn_up, NULL,
  8193. model.layers[il].ffn_gate, NULL,
  8194. model.layers[il].ffn_down, NULL,
  8195. NULL,
  8196. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  8197. cb(cur, "ffn_out", il);
  8198. }
  8199. // scale the hidden states for residual connection
  8200. cur = ggml_scale(ctx0, cur, scale_res);
  8201. cb(cur, "hidden_scaled_ffn", -1);
  8202. cur = ggml_add(ctx0, cur, ffn_inp);
  8203. cb(cur, "l_out", il);
  8204. // input for next layer
  8205. inpL = cur;
  8206. }
  8207. cur = inpL;
  8208. cur = llm_build_norm(ctx0, cur, hparams,
  8209. model.output_norm, NULL,
  8210. LLM_NORM_RMS, cb, -1);
  8211. cb(cur, "result_norm", -1);
  8212. // lm_head scaling
  8213. const float scale_lmhead = float(n_embd_base)/float(n_embd);
  8214. cur = ggml_scale(ctx0, cur, scale_lmhead);
  8215. cb(cur, "lmhead_scaling", -1);
  8216. // lm_head
  8217. cur = ggml_mul_mat(ctx0, model.tok_embd, cur);
  8218. cb(cur, "result_output", -1);
  8219. ggml_build_forward_expand(gf, cur);
  8220. return gf;
  8221. }
  8222. struct ggml_cgraph * build_gemma() {
  8223. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8224. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  8225. struct ggml_tensor * cur;
  8226. struct ggml_tensor * inpL;
  8227. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8228. inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
  8229. cb(inpL, "inp_scaled", -1);
  8230. // inp_pos - contains the positions
  8231. struct ggml_tensor * inp_pos = build_inp_pos();
  8232. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8233. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8234. for (int il = 0; il < n_layer; ++il) {
  8235. // norm
  8236. cur = llm_build_norm(ctx0, inpL, hparams,
  8237. model.layers[il].attn_norm, NULL,
  8238. LLM_NORM_RMS, cb, il);
  8239. cb(cur, "attn_norm", il);
  8240. // self-attention
  8241. {
  8242. // compute Q and K and RoPE them
  8243. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8244. cb(Qcur, "Qcur", il);
  8245. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8246. cb(Kcur, "Kcur", il);
  8247. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8248. cb(Vcur, "Vcur", il);
  8249. Qcur = ggml_rope_custom(
  8250. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos,
  8251. n_embd_head_k, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8252. ext_factor, attn_factor, beta_fast, beta_slow);
  8253. cb(Qcur, "Qcur", il);
  8254. Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));
  8255. cb(Qcur, "Qcur_scaled", il);
  8256. Kcur = ggml_rope_custom(
  8257. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos,
  8258. n_embd_head_k, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8259. ext_factor, attn_factor, beta_fast, beta_slow);
  8260. cb(Kcur, "Kcur", il);
  8261. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8262. model.layers[il].wo, NULL,
  8263. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
  8264. }
  8265. if (il == n_layer - 1) {
  8266. // skip computing output for unused tokens
  8267. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8268. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8269. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8270. }
  8271. struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
  8272. cb(sa_out, "sa_out", il);
  8273. cur = llm_build_norm(ctx0, sa_out, hparams,
  8274. model.layers[il].ffn_norm, NULL,
  8275. LLM_NORM_RMS, cb, il);
  8276. cb(cur, "ffn_norm", il);
  8277. // feed-forward network
  8278. {
  8279. cur = llm_build_ffn(ctx0, cur,
  8280. model.layers[il].ffn_up, NULL,
  8281. model.layers[il].ffn_gate, NULL,
  8282. model.layers[il].ffn_down, NULL,
  8283. NULL,
  8284. LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
  8285. cb(cur, "ffn_out", il);
  8286. }
  8287. cur = ggml_add(ctx0, cur, sa_out);
  8288. cb(cur, "l_out", il);
  8289. // input for next layer
  8290. inpL = cur;
  8291. }
  8292. cur = inpL;
  8293. cur = llm_build_norm(ctx0, cur, hparams,
  8294. model.output_norm, NULL,
  8295. LLM_NORM_RMS, cb, -1);
  8296. cb(cur, "result_norm", -1);
  8297. // lm_head
  8298. cur = ggml_mul_mat(ctx0, model.output, cur);
  8299. cb(cur, "result_output", -1);
  8300. ggml_build_forward_expand(gf, cur);
  8301. return gf;
  8302. }
  8303. struct ggml_cgraph * build_starcoder2() {
  8304. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8305. const int64_t n_embd_head = hparams.n_embd_head_v;
  8306. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8307. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8308. struct ggml_tensor * cur;
  8309. struct ggml_tensor * inpL;
  8310. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8311. // inp_pos - contains the positions
  8312. struct ggml_tensor * inp_pos = build_inp_pos();
  8313. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8314. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8315. for (int il = 0; il < n_layer; ++il) {
  8316. struct ggml_tensor * inpSA = inpL;
  8317. // norm
  8318. cur = llm_build_norm(ctx0, inpL, hparams,
  8319. model.layers[il].attn_norm, model.layers[il].attn_norm_b,
  8320. LLM_NORM, cb, il);
  8321. cb(cur, "attn_norm", il);
  8322. // self-attention
  8323. {
  8324. // compute Q and K and RoPE them
  8325. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8326. cb(Qcur, "Qcur", il);
  8327. if (model.layers[il].bq) {
  8328. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8329. cb(Qcur, "Qcur", il);
  8330. }
  8331. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8332. cb(Kcur, "Kcur", il);
  8333. if (model.layers[il].bk) {
  8334. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8335. cb(Kcur, "Kcur", il);
  8336. }
  8337. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8338. cb(Vcur, "Vcur", il);
  8339. if (model.layers[il].bv) {
  8340. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8341. cb(Vcur, "Vcur", il);
  8342. }
  8343. Qcur = ggml_rope_custom(
  8344. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  8345. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8346. ext_factor, attn_factor, beta_fast, beta_slow
  8347. );
  8348. cb(Qcur, "Qcur", il);
  8349. Kcur = ggml_rope_custom(
  8350. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  8351. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8352. ext_factor, attn_factor, beta_fast, beta_slow
  8353. );
  8354. cb(Kcur, "Kcur", il);
  8355. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8356. model.layers[il].wo, model.layers[il].bo,
  8357. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8358. }
  8359. if (il == n_layer - 1) {
  8360. // skip computing output for unused tokens
  8361. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8362. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8363. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8364. }
  8365. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8366. cb(ffn_inp, "ffn_inp", il);
  8367. // feed-forward network
  8368. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  8369. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  8370. LLM_NORM, cb, il);
  8371. cb(cur, "ffn_norm", il);
  8372. cur = llm_build_ffn(ctx0, cur,
  8373. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  8374. NULL, NULL,
  8375. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  8376. NULL,
  8377. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  8378. cb(cur, "ffn_out", il);
  8379. cur = ggml_add(ctx0, cur, ffn_inp);
  8380. cb(cur, "l_out", il);
  8381. // input for next layer
  8382. inpL = cur;
  8383. }
  8384. cur = inpL;
  8385. cur = llm_build_norm(ctx0, cur, hparams,
  8386. model.output_norm, model.output_norm_b,
  8387. LLM_NORM, cb, -1);
  8388. cb(cur, "result_norm", -1);
  8389. // lm_head
  8390. cur = ggml_mul_mat(ctx0, model.output, cur);
  8391. cb(cur, "result_output", -1);
  8392. ggml_build_forward_expand(gf, cur);
  8393. return gf;
  8394. }
  8395. struct ggml_cgraph * build_mamba() {
  8396. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8397. const int64_t d_model = n_embd;
  8398. const int64_t d_conv = hparams.ssm_d_conv;
  8399. const int64_t d_inner = hparams.ssm_d_inner;
  8400. GGML_ASSERT(2 * d_model == d_inner);
  8401. const int64_t d_state = hparams.ssm_d_state;
  8402. const int64_t dt_rank = hparams.ssm_dt_rank;
  8403. struct ggml_tensor * cur;
  8404. struct ggml_tensor * inpL;
  8405. // {n_embd, n_tokens}
  8406. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8407. struct ggml_tensor * state_mask = build_inp_s_mask();
  8408. struct ggml_tensor * state_seq = build_inp_s_seq();
  8409. for (int il = 0; il < n_layer; ++il) {
  8410. // (ab)using the KV cache to store the states
  8411. struct ggml_tensor * conv_states = ggml_reshape_2d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s(), kv_self.size);
  8412. struct ggml_tensor * ssm_states = ggml_reshape_2d(ctx0, kv_self.v_l[il], hparams.n_embd_v_s(), kv_self.size);
  8413. // clear states of sequences which are starting at the beginning of this batch
  8414. {
  8415. conv_states = ggml_mul(ctx0,
  8416. ggml_view_2d(ctx0, conv_states, conv_states->ne[0], n_kv, conv_states->nb[1], kv_head*conv_states->nb[1]),
  8417. state_mask);
  8418. ssm_states = ggml_mul(ctx0,
  8419. ggml_view_2d(ctx0, ssm_states, ssm_states->ne[0], n_kv, ssm_states->nb[1], kv_head*ssm_states->nb[1]),
  8420. state_mask);
  8421. }
  8422. conv_states = ggml_reshape_3d(ctx0, conv_states, d_conv - 1, d_inner, n_kv);
  8423. ssm_states = ggml_reshape_3d(ctx0, ssm_states, d_state, d_inner, n_kv);
  8424. // norm
  8425. cur = llm_build_norm(ctx0, inpL, hparams,
  8426. model.layers[il].attn_norm, NULL,
  8427. LLM_NORM_RMS, cb, il);
  8428. cb(cur, "attn_norm", il);
  8429. // {n_embd, 2*d_inner} * {n_embd, n_tokens} => {2*d_inner, n_tokens}
  8430. struct ggml_tensor * xz = ggml_mul_mat(ctx0, model.layers[il].ssm_in, cur);
  8431. // split the above in two
  8432. // => {d_inner, n_tokens}
  8433. struct ggml_tensor * x = ggml_view_2d(ctx0, xz, d_inner, xz->ne[1], xz->nb[1], 0);
  8434. struct ggml_tensor * z = ggml_view_2d(ctx0, xz, d_inner, xz->ne[1], xz->nb[1], ggml_element_size(xz)*d_inner);
  8435. // conv
  8436. {
  8437. // Custom operator which is needed only to ease simultaneous sequence processing.
  8438. // For a single sequence, the equivalent is to concatenate the columns of conv_states and x,
  8439. // then make a self-overlapping view of that over d_conv columns at each stride in the 3rd dimension,
  8440. // then element-wise multiply that with the conv1d weigth,
  8441. // then sum the elements of each row,
  8442. // (the last two steps are a dot product over rows (also doable with mul_mat))
  8443. // then permute away the ne[0] dimension,
  8444. // and then you're left with the resulting x tensor.
  8445. // The new conv_states is the last (d_conv - 1) columns
  8446. // of the last 3rd dimensional "layer" of the self-overlapping view.
  8447. // For simultaneous sequences, it's more complicated.
  8448. struct ggml_tensor * x_conv = ggml_ssm_conv(ctx0, conv_states, x, model.layers[il].ssm_conv1d, state_seq);
  8449. // store last (d_conv - 1) columns of the conv_state part of x_conv back into the KV cache
  8450. ggml_build_forward_expand(gf,
  8451. ggml_cpy(ctx0,
  8452. ggml_view_2d(ctx0, x_conv, d_conv - 1, d_inner*n_kv, d_conv*ggml_element_size(x_conv), (1+d_inner*n_tokens)*ggml_element_size(x_conv)),
  8453. ggml_view_1d(ctx0, kv_self.k_l[il], (d_conv - 1)*(d_inner)*(n_kv), kv_head*(d_conv - 1)*(d_inner)*ggml_element_size(x_conv))));
  8454. // extract x from x_conv
  8455. x = ggml_view_2d(ctx0, x_conv, d_inner, n_tokens, d_inner*ggml_element_size(x_conv), 0);
  8456. // bias
  8457. x = ggml_add(ctx0, x, model.layers[il].ssm_conv1d_b);
  8458. x = ggml_silu(ctx0, x);
  8459. }
  8460. // ssm
  8461. {
  8462. // {d_inner, dt_rank + 2*d_state} * {d_inner, n_tokens} => {dt_rank + 2*d_state, n_tokens}
  8463. struct ggml_tensor * x_db = ggml_mul_mat(ctx0, model.layers[il].ssm_x, x);
  8464. // split
  8465. struct ggml_tensor * dt = ggml_view_2d(ctx0, x_db, dt_rank, n_tokens, x_db->nb[1], 0);
  8466. struct ggml_tensor * B = ggml_view_2d(ctx0, x_db, d_state, n_tokens, x_db->nb[1], ggml_element_size(x_db)*dt_rank);
  8467. struct ggml_tensor * C = ggml_view_2d(ctx0, x_db, d_state, n_tokens, x_db->nb[1], ggml_element_size(x_db)*(dt_rank+d_state));
  8468. // {dt_rank, d_inner} * {dt_rank, n_tokens} => {d_inner, n_tokens}
  8469. dt = ggml_mul_mat(ctx0, model.layers[il].ssm_dt, dt);
  8470. dt = ggml_add(ctx0, dt, model.layers[il].ssm_dt_b);
  8471. // Custom operator to optimize the parallel associative scan
  8472. // as described in the Annex D of the Mamba paper.
  8473. // => {d_inner, n_tokens} and {d_state, d_inner, n_kv} combined,
  8474. // because only a single tensor can be returned.
  8475. struct ggml_tensor * y_ssm_states = ggml_ssm_scan(ctx0, ssm_states, x, dt, model.layers[il].ssm_a, B, C, state_seq);
  8476. // store last states (the second part of y_ssm_states)
  8477. ggml_build_forward_expand(gf,
  8478. ggml_cpy(ctx0,
  8479. ggml_view_1d(ctx0, y_ssm_states, d_state*d_inner*n_kv, d_inner*n_tokens*ggml_element_size(y_ssm_states)),
  8480. ggml_view_1d(ctx0, kv_self.v_l[il], d_state*d_inner*n_kv, kv_head*d_state*d_inner*ggml_element_size(ssm_states))));
  8481. struct ggml_tensor * y = ggml_view_2d(ctx0, y_ssm_states, d_inner, n_tokens, d_inner*ggml_element_size(y_ssm_states), 0);
  8482. if (il == n_layer - 1) {
  8483. // skip computing output for unused tokens
  8484. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8485. x = ggml_get_rows(ctx0, x, inp_out_ids);
  8486. y = ggml_get_rows(ctx0, y, inp_out_ids);
  8487. z = ggml_get_rows(ctx0, z, inp_out_ids);
  8488. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8489. }
  8490. // {d_inner, n_tokens} * {d_inner} => {d_inner, n_tokens}
  8491. y = ggml_add(ctx0, y, ggml_mul(ctx0, x, model.layers[il].ssm_d));
  8492. y = ggml_mul(ctx0, y, ggml_silu(ctx0, z));
  8493. // {d_inner, n_embd} * {d_inner, n_tokens} => {n_embd, n_tokens}
  8494. cur = ggml_mul_mat(ctx0, model.layers[il].ssm_out, y);
  8495. }
  8496. // residual
  8497. cur = ggml_add(ctx0, cur, inpL);
  8498. cb(cur, "l_out", il);
  8499. // input for next layer
  8500. inpL = cur;
  8501. }
  8502. // final rmsnorm
  8503. cur = llm_build_norm(ctx0, inpL, hparams,
  8504. model.output_norm, NULL,
  8505. LLM_NORM_RMS, cb, -1);
  8506. cb(cur, "result_norm", -1);
  8507. // lm_head
  8508. cur = ggml_mul_mat(ctx0, model.output, cur);
  8509. cb(cur, "result_output", -1);
  8510. ggml_build_forward_expand(gf, cur);
  8511. return gf;
  8512. }
  8513. struct ggml_cgraph * build_command_r() {
  8514. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8515. const int64_t n_embd_head = hparams.n_embd_head_v;
  8516. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8517. const float f_logit_scale = hparams.f_logit_scale;
  8518. struct ggml_tensor * cur;
  8519. struct ggml_tensor * inpL;
  8520. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8521. // inp_pos - contains the positions
  8522. struct ggml_tensor * inp_pos = build_inp_pos();
  8523. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8524. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8525. for (int il = 0; il < n_layer; ++il) {
  8526. // norm
  8527. cur = llm_build_norm(ctx0, inpL, hparams,
  8528. model.layers[il].attn_norm, NULL,
  8529. LLM_NORM, cb, il);
  8530. cb(cur, "attn_norm", il);
  8531. struct ggml_tensor * ffn_inp = cur;
  8532. // self-attention
  8533. {
  8534. // compute Q and K and RoPE them
  8535. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8536. cb(Qcur, "Qcur", il);
  8537. if (model.layers[il].bq) {
  8538. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8539. cb(Qcur, "Qcur", il);
  8540. }
  8541. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8542. cb(Kcur, "Kcur", il);
  8543. if (model.layers[il].bk) {
  8544. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8545. cb(Kcur, "Kcur", il);
  8546. }
  8547. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8548. cb(Vcur, "Vcur", il);
  8549. if (model.layers[il].bv) {
  8550. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8551. cb(Vcur, "Vcur", il);
  8552. }
  8553. if (model.layers[il].attn_q_norm) {
  8554. Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens,
  8555. ggml_element_size(Qcur) * n_embd_head,
  8556. ggml_element_size(Qcur) * n_embd_head * n_head,
  8557. 0);
  8558. cb(Qcur, "Qcur", il);
  8559. Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens,
  8560. ggml_element_size(Kcur) * n_embd_head,
  8561. ggml_element_size(Kcur) * n_embd_head * n_head_kv,
  8562. 0);
  8563. cb(Kcur, "Kcur", il);
  8564. Qcur = llm_build_norm(ctx0, Qcur, hparams,
  8565. model.layers[il].attn_q_norm,
  8566. NULL,
  8567. LLM_NORM, cb, il);
  8568. cb(Qcur, "Qcur", il);
  8569. Kcur = llm_build_norm(ctx0, Kcur, hparams,
  8570. model.layers[il].attn_k_norm,
  8571. NULL,
  8572. LLM_NORM, cb, il);
  8573. cb(Kcur, "Kcur", il);
  8574. }
  8575. Qcur = ggml_rope_custom(
  8576. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  8577. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8578. ext_factor, attn_factor, beta_fast, beta_slow
  8579. );
  8580. cb(Qcur, "Qcur", il);
  8581. Kcur = ggml_rope_custom(
  8582. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  8583. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8584. ext_factor, attn_factor, beta_fast, beta_slow
  8585. );
  8586. cb(Kcur, "Kcur", il);
  8587. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8588. model.layers[il].wo, model.layers[il].bo,
  8589. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8590. }
  8591. if (il == n_layer - 1) {
  8592. // skip computing output for unused tokens
  8593. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8594. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8595. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8596. ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
  8597. }
  8598. struct ggml_tensor * attn_out = cur;
  8599. // feed-forward network
  8600. {
  8601. cur = llm_build_ffn(ctx0, ffn_inp,
  8602. model.layers[il].ffn_up, NULL,
  8603. model.layers[il].ffn_gate, NULL,
  8604. model.layers[il].ffn_down, NULL,
  8605. NULL,
  8606. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  8607. cb(cur, "ffn_out", il);
  8608. }
  8609. // add together residual + FFN + self-attention
  8610. cur = ggml_add(ctx0, cur, inpL);
  8611. cur = ggml_add(ctx0, cur, attn_out);
  8612. cb(cur, "l_out", il);
  8613. // input for next layer
  8614. inpL = cur;
  8615. }
  8616. cur = inpL;
  8617. cur = llm_build_norm(ctx0, cur, hparams,
  8618. model.output_norm, NULL,
  8619. LLM_NORM, cb, -1);
  8620. cb(cur, "result_norm", -1);
  8621. // lm_head
  8622. cur = ggml_mul_mat(ctx0, model.output, cur);
  8623. if (f_logit_scale) {
  8624. cur = ggml_scale(ctx0, cur, f_logit_scale);
  8625. }
  8626. cb(cur, "result_output", -1);
  8627. ggml_build_forward_expand(gf, cur);
  8628. return gf;
  8629. }
  8630. // ref: https://allenai.org/olmo
  8631. // based on the original build_llama() function, changes:
  8632. // * non-parametric layer norm
  8633. // * clamp qkv
  8634. // * removed bias
  8635. // * removed MoE
  8636. struct ggml_cgraph * build_olmo() {
  8637. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8638. // mutable variable, needed during the last layer of the computation to skip unused tokens
  8639. int32_t n_tokens = this->n_tokens;
  8640. const int64_t n_embd_head = hparams.n_embd_head_v;
  8641. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8642. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8643. struct ggml_tensor * cur;
  8644. struct ggml_tensor * inpL;
  8645. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8646. // inp_pos - contains the positions
  8647. struct ggml_tensor * inp_pos = build_inp_pos();
  8648. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8649. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8650. for (int il = 0; il < n_layer; ++il) {
  8651. struct ggml_tensor * inpSA = inpL;
  8652. // norm
  8653. cur = llm_build_norm(ctx0, inpL, hparams,
  8654. NULL, NULL,
  8655. LLM_NORM, cb, il);
  8656. cb(cur, "attn_norm", il);
  8657. // self-attention
  8658. {
  8659. // compute Q and K and RoPE them
  8660. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8661. cb(Qcur, "Qcur", il);
  8662. if (hparams.f_clamp_kqv > 0.0f) {
  8663. Qcur = ggml_clamp(ctx0, Qcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  8664. cb(Qcur, "Qcur", il);
  8665. }
  8666. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8667. cb(Kcur, "Kcur", il);
  8668. if (hparams.f_clamp_kqv > 0.0f) {
  8669. Kcur = ggml_clamp(ctx0, Kcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  8670. cb(Kcur, "Kcur", il);
  8671. }
  8672. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8673. cb(Vcur, "Vcur", il);
  8674. if (hparams.f_clamp_kqv > 0.0f) {
  8675. Vcur = ggml_clamp(ctx0, Vcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  8676. cb(Vcur, "Vcur", il);
  8677. }
  8678. Qcur = ggml_rope_custom(
  8679. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
  8680. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8681. ext_factor, attn_factor, beta_fast, beta_slow
  8682. );
  8683. cb(Qcur, "Qcur", il);
  8684. Kcur = ggml_rope_custom(
  8685. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
  8686. n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
  8687. ext_factor, attn_factor, beta_fast, beta_slow
  8688. );
  8689. cb(Kcur, "Kcur", il);
  8690. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8691. model.layers[il].wo, nullptr,
  8692. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8693. }
  8694. if (il == n_layer - 1) {
  8695. // skip computing output for unused tokens
  8696. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8697. n_tokens = n_outputs;
  8698. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8699. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8700. }
  8701. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8702. cb(ffn_inp, "ffn_inp", il);
  8703. // feed-forward network
  8704. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  8705. NULL, NULL,
  8706. LLM_NORM, cb, il);
  8707. cb(cur, "ffn_norm", il);
  8708. cur = llm_build_ffn(ctx0, cur,
  8709. model.layers[il].ffn_up, NULL,
  8710. model.layers[il].ffn_gate, NULL,
  8711. model.layers[il].ffn_down, NULL,
  8712. NULL,
  8713. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  8714. cb(cur, "ffn_out", il);
  8715. cur = ggml_add(ctx0, cur, ffn_inp);
  8716. cb(cur, "ffn_out", il);
  8717. ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
  8718. if (layer_dir != nullptr) {
  8719. cur = ggml_add(ctx0, cur, layer_dir);
  8720. }
  8721. cb(cur, "l_out", il);
  8722. // input for next layer
  8723. inpL = cur;
  8724. }
  8725. cur = inpL;
  8726. cur = llm_build_norm(ctx0, cur, hparams,
  8727. NULL, NULL,
  8728. LLM_NORM, cb, -1);
  8729. cb(cur, "result_norm", -1);
  8730. // lm_head
  8731. cur = ggml_mul_mat(ctx0, model.output, cur);
  8732. cb(cur, "result_output", -1);
  8733. ggml_build_forward_expand(gf, cur);
  8734. return gf;
  8735. }
  8736. };
  8737. static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const std::vector<uint32_t> & ids) {
  8738. llama_batch dummy;
  8739. dummy.n_tokens = 0;
  8740. llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
  8741. struct llm_build_context llm(lctx, dummy, cb, false);
  8742. llm.init();
  8743. struct ggml_cgraph * result = llm.build_defrag(ids);
  8744. llm.free();
  8745. return result;
  8746. }
  8747. static struct ggml_cgraph * llama_build_graph_k_shift(llama_context & lctx) {
  8748. llama_batch dummy;
  8749. dummy.n_tokens = 0;
  8750. llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
  8751. struct llm_build_context llm(lctx, dummy, cb, false);
  8752. llm.init();
  8753. struct ggml_cgraph * result = llm.build_k_shift();
  8754. llm.free();
  8755. return result;
  8756. }
  8757. static struct ggml_cgraph * llama_build_graph_s_copy(llama_context & lctx) {
  8758. llama_batch dummy;
  8759. dummy.n_tokens = 0;
  8760. llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
  8761. struct llm_build_context llm(lctx, dummy, cb, false);
  8762. llm.init();
  8763. struct ggml_cgraph * result = llm.build_s_copy();
  8764. llm.free();
  8765. return result;
  8766. }
  8767. static struct ggml_cgraph * llama_build_graph(
  8768. llama_context & lctx,
  8769. const llama_batch & batch,
  8770. bool worst_case) {
  8771. const auto & model = lctx.model;
  8772. // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
  8773. llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
  8774. if (il >= 0) {
  8775. ggml_format_name(cur, "%s-%d", name, il);
  8776. } else {
  8777. ggml_set_name(cur, name);
  8778. }
  8779. if (!lctx.cparams.offload_kqv) {
  8780. if (strcmp(name, "kqv_merged_cont") == 0) {
  8781. // all nodes between the KV store and the attention output are run on the CPU
  8782. ggml_backend_sched_set_tensor_backend(lctx.sched, cur, lctx.backend_cpu);
  8783. }
  8784. }
  8785. // norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends
  8786. // FIXME: fix in ggml_backend_sched
  8787. const bool full_offload = lctx.model.n_gpu_layers > (int)lctx.model.hparams.n_layer;
  8788. if (batch.n_tokens < 32 || full_offload) {
  8789. if (il != -1 && strcmp(name, "norm") == 0) {
  8790. for (auto * backend : lctx.backends) {
  8791. if (ggml_backend_buft_supports_backend(lctx.model.buft_layer[il].buft, backend)) {
  8792. ggml_backend_sched_set_tensor_backend(lctx.sched, cur, backend);
  8793. break;
  8794. }
  8795. }
  8796. }
  8797. }
  8798. };
  8799. struct ggml_cgraph * result = NULL;
  8800. struct llm_build_context llm(lctx, batch, cb, worst_case);
  8801. llm.init();
  8802. switch (model.arch) {
  8803. case LLM_ARCH_LLAMA:
  8804. {
  8805. result = llm.build_llama();
  8806. } break;
  8807. case LLM_ARCH_BAICHUAN:
  8808. {
  8809. result = llm.build_baichuan();
  8810. } break;
  8811. case LLM_ARCH_FALCON:
  8812. {
  8813. result = llm.build_falcon();
  8814. } break;
  8815. case LLM_ARCH_GROK:
  8816. {
  8817. result = llm.build_grok();
  8818. } break;
  8819. case LLM_ARCH_STARCODER:
  8820. {
  8821. result = llm.build_starcoder();
  8822. } break;
  8823. case LLM_ARCH_REFACT:
  8824. {
  8825. result = llm.build_refact();
  8826. } break;
  8827. case LLM_ARCH_BERT:
  8828. case LLM_ARCH_JINA_BERT_V2:
  8829. case LLM_ARCH_NOMIC_BERT:
  8830. {
  8831. result = llm.build_bert();
  8832. } break;
  8833. case LLM_ARCH_BLOOM:
  8834. {
  8835. result = llm.build_bloom();
  8836. } break;
  8837. case LLM_ARCH_MPT:
  8838. {
  8839. result = llm.build_mpt();
  8840. } break;
  8841. case LLM_ARCH_STABLELM:
  8842. {
  8843. result = llm.build_stablelm();
  8844. } break;
  8845. case LLM_ARCH_QWEN:
  8846. {
  8847. result = llm.build_qwen();
  8848. } break;
  8849. case LLM_ARCH_QWEN2:
  8850. {
  8851. result = llm.build_qwen2();
  8852. } break;
  8853. case LLM_ARCH_QWEN2MOE:
  8854. {
  8855. result = llm.build_qwen2moe();
  8856. } break;
  8857. case LLM_ARCH_PHI2:
  8858. {
  8859. result = llm.build_phi2();
  8860. } break;
  8861. case LLM_ARCH_PHI3:
  8862. {
  8863. result = llm.build_phi3();
  8864. } break;
  8865. case LLM_ARCH_PLAMO:
  8866. {
  8867. result = llm.build_plamo();
  8868. } break;
  8869. case LLM_ARCH_GPT2:
  8870. {
  8871. result = llm.build_gpt2();
  8872. } break;
  8873. case LLM_ARCH_CODESHELL:
  8874. {
  8875. result = llm.build_codeshell();
  8876. } break;
  8877. case LLM_ARCH_ORION:
  8878. {
  8879. result = llm.build_orion();
  8880. } break;
  8881. case LLM_ARCH_INTERNLM2:
  8882. {
  8883. result = llm.build_internlm2();
  8884. } break;
  8885. case LLM_ARCH_MINICPM:
  8886. {
  8887. result = llm.build_minicpm();
  8888. } break;
  8889. case LLM_ARCH_GEMMA:
  8890. {
  8891. result = llm.build_gemma();
  8892. } break;
  8893. case LLM_ARCH_STARCODER2:
  8894. {
  8895. result = llm.build_starcoder2();
  8896. } break;
  8897. case LLM_ARCH_MAMBA:
  8898. {
  8899. result = llm.build_mamba();
  8900. } break;
  8901. case LLM_ARCH_XVERSE:
  8902. {
  8903. result = llm.build_xverse();
  8904. } break;
  8905. case LLM_ARCH_COMMAND_R:
  8906. {
  8907. result = llm.build_command_r();
  8908. } break;
  8909. case LLM_ARCH_DBRX:
  8910. {
  8911. result = llm.build_dbrx();
  8912. } break;
  8913. case LLM_ARCH_OLMO:
  8914. {
  8915. result = llm.build_olmo();
  8916. } break;
  8917. default:
  8918. GGML_ASSERT(false);
  8919. }
  8920. llm.free();
  8921. return result;
  8922. }
  8923. static void llama_set_k_shift(llama_context & lctx) {
  8924. const int64_t kv_size = lctx.kv_self.size;
  8925. assert(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer));
  8926. int32_t * data = (int32_t *) lctx.inp_K_shift->data;
  8927. for (int i = 0; i < kv_size; ++i) {
  8928. data[i] = lctx.kv_self.cells[i].delta;
  8929. }
  8930. }
  8931. static void llama_set_s_copy(llama_context & lctx) {
  8932. const int64_t kv_size = lctx.kv_self.size;
  8933. assert(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer));
  8934. int32_t * data = (int32_t *) lctx.inp_s_copy->data;
  8935. for (int i = 0; i < kv_size; ++i) {
  8936. data[i] = lctx.kv_self.cells[i].src;
  8937. }
  8938. }
  8939. static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
  8940. //
  8941. // set input data
  8942. //
  8943. const auto & hparams = lctx.model.hparams;
  8944. const auto & cparams = lctx.cparams;
  8945. const auto & kv_self = lctx.kv_self;
  8946. if (batch.token) {
  8947. const int64_t n_tokens = batch.n_tokens;
  8948. ggml_backend_tensor_set(lctx.inp_tokens, batch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens));
  8949. }
  8950. if (batch.embd) {
  8951. const int64_t n_embd = hparams.n_embd;
  8952. const int64_t n_tokens = batch.n_tokens;
  8953. ggml_backend_tensor_set(lctx.inp_embd, batch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd));
  8954. }
  8955. if (batch.pos && lctx.inp_pos) {
  8956. const int64_t n_tokens = batch.n_tokens;
  8957. ggml_backend_tensor_set(lctx.inp_pos, batch.pos, 0, n_tokens*ggml_element_size(lctx.inp_pos));
  8958. }
  8959. if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
  8960. GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs");
  8961. const int64_t n_tokens = batch.n_tokens;
  8962. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_out_ids->buffer));
  8963. int32_t * data = (int32_t *) lctx.inp_out_ids->data;
  8964. if (lctx.n_outputs == n_tokens) {
  8965. for (int i = 0; i < n_tokens; ++i) {
  8966. data[i] = i;
  8967. }
  8968. } else if (batch.logits) {
  8969. int32_t n_outputs = 0;
  8970. for (int i = 0; i < n_tokens; ++i) {
  8971. if (batch.logits[i]) {
  8972. data[n_outputs++] = i;
  8973. }
  8974. }
  8975. // the graph needs to have been passed the correct number of outputs
  8976. GGML_ASSERT(lctx.n_outputs == n_outputs);
  8977. } else if (lctx.n_outputs == 1) {
  8978. // only keep last output
  8979. data[0] = n_tokens - 1;
  8980. } else {
  8981. GGML_ASSERT(lctx.n_outputs == 0);
  8982. }
  8983. }
  8984. GGML_ASSERT(
  8985. // (!a || b) is a logical implication (a -> b)
  8986. // !hparams.causal_attn -> !cparams.causal_attn
  8987. (hparams.causal_attn || !cparams.causal_attn) &&
  8988. "causal attention with embedding models is not supported"
  8989. );
  8990. if (lctx.inp_KQ_mask) {
  8991. // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache.
  8992. if (cparams.causal_attn) {
  8993. const int64_t n_kv = kv_self.n;
  8994. const int64_t n_tokens = batch.n_tokens;
  8995. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
  8996. float * data = (float *) lctx.inp_KQ_mask->data;
  8997. // For causal attention, use only the previous KV cells
  8998. // of the correct sequence for each token of the batch.
  8999. // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
  9000. for (int h = 0; h < 1; ++h) {
  9001. for (int j = 0; j < n_tokens; ++j) {
  9002. const llama_pos pos = batch.pos[j];
  9003. const llama_seq_id seq_id = batch.seq_id[j][0];
  9004. for (int i = 0; i < n_kv; ++i) {
  9005. float f;
  9006. if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
  9007. f = -INFINITY;
  9008. } else {
  9009. if (hparams.use_alibi) {
  9010. f = -fabs(lctx.kv_self.cells[i].pos - pos);
  9011. } else {
  9012. f = 0.0f;
  9013. }
  9014. }
  9015. data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
  9016. }
  9017. }
  9018. for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
  9019. for (int j = 0; j < n_kv; ++j) {
  9020. data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
  9021. }
  9022. }
  9023. }
  9024. } else {
  9025. // when using kv cache, the mask needs to match the kv cache size
  9026. const int64_t n_tokens = batch.n_tokens;
  9027. const int64_t n_stride = hparams.causal_attn ? kv_self.n : n_tokens;
  9028. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
  9029. float * data = (float *) lctx.inp_KQ_mask->data;
  9030. for (int h = 0; h < 1; ++h) {
  9031. for (int j = 0; j < n_tokens; ++j) {
  9032. const llama_seq_id seq_id = batch.seq_id[j][0];
  9033. for (int i = 0; i < n_tokens; ++i) {
  9034. float f = -INFINITY;
  9035. for (int s = 0; s < batch.n_seq_id[i]; ++s) {
  9036. if (batch.seq_id[i][s] == seq_id) {
  9037. if (hparams.use_alibi) {
  9038. f = -fabs(batch.pos[i] - batch.pos[j]);
  9039. } else {
  9040. f = 0.0f;
  9041. }
  9042. break;
  9043. }
  9044. }
  9045. data[h*(n_tokens*n_tokens) + j*n_stride + i] = f;
  9046. }
  9047. for (int i = n_tokens; i < n_stride; ++i) {
  9048. data[h*(n_tokens*n_tokens) + j*n_stride + i] = -INFINITY;
  9049. }
  9050. }
  9051. }
  9052. }
  9053. }
  9054. if (cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
  9055. const int64_t n_tokens = batch.n_tokens;
  9056. GGML_ASSERT(lctx.inp_mean);
  9057. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
  9058. float * data = (float *) lctx.inp_mean->data;
  9059. memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean));
  9060. std::vector<uint64_t> sum(n_tokens, 0);
  9061. for (int i = 0; i < n_tokens; ++i) {
  9062. const llama_seq_id seq_id = batch.seq_id[i][0];
  9063. GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
  9064. sum[seq_id] += 1;
  9065. }
  9066. std::vector<float> div(n_tokens, 0.0f);
  9067. for (int i = 0; i < n_tokens; ++i) {
  9068. const uint64_t s = sum[i];
  9069. if (s > 0) {
  9070. div[i] = 1.0f/float(s);
  9071. }
  9072. }
  9073. for (int i = 0; i < n_tokens; ++i) {
  9074. const llama_seq_id seq_id = batch.seq_id[i][0];
  9075. data[seq_id*n_tokens + i] = div[seq_id];
  9076. }
  9077. }
  9078. if (cparams.pooling_type == LLAMA_POOLING_TYPE_CLS) {
  9079. const int64_t n_tokens = batch.n_tokens;
  9080. GGML_ASSERT(lctx.inp_cls);
  9081. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
  9082. uint32_t * data = (uint32_t *) lctx.inp_cls->data;
  9083. memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
  9084. for (int i = 0; i < n_tokens; ++i) {
  9085. const llama_seq_id seq_id = batch.seq_id[i][0];
  9086. const llama_pos pos = batch.pos[i];
  9087. GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS");
  9088. if (pos == 0) {
  9089. data[seq_id] = i;
  9090. }
  9091. }
  9092. }
  9093. if (kv_self.recurrent) {
  9094. const int64_t n_kv = kv_self.n;
  9095. if (lctx.inp_s_mask) {
  9096. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_mask->buffer));
  9097. float * data = (float *) lctx.inp_s_mask->data;
  9098. // states which are not affected by the current batch are left untouched
  9099. for (int i = 0; i < n_kv; ++i) {
  9100. llama_seq_id seq_id = i + lctx.kv_self.head;
  9101. llama_kv_cell & kv_cell = lctx.kv_self.cells[seq_id];
  9102. bool has_self_seq = kv_cell.has_seq_id(seq_id);
  9103. data[i] = (float) has_self_seq;
  9104. // ensure current sequences will be kept
  9105. if (!has_self_seq && kv_cell.pos >= 0) {
  9106. kv_cell.seq_id.insert(seq_id);
  9107. }
  9108. }
  9109. }
  9110. // For Mamba (and other recurrent architectures),
  9111. // update the correct state(s)/sequence(s) for each token of the batch.
  9112. // Like with the KQ_mask, if a token in the batch has multiple sequences,
  9113. // they are assumed to be equivalent (not here, but in ggml_ssm_scan and ggml_ssm_conv).
  9114. if (lctx.inp_s_seq) {
  9115. const int64_t n_tokens = batch.n_tokens;
  9116. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_seq->buffer));
  9117. int32_t * data = (int32_t *) lctx.inp_s_seq->data;
  9118. for (int j = 0; j < n_tokens; ++j) {
  9119. const int32_t n_seq = batch.n_seq_id[j];
  9120. GGML_ASSERT(0 < n_seq); // a token should be part of at least 1 sequence
  9121. for (int i = 0; i < n_kv; ++i) {
  9122. if (i < n_seq) {
  9123. // for this type of model, the head is the minimum seq_id of the batch
  9124. data[j*n_kv + i] = batch.seq_id[j][i] - kv_self.head;
  9125. } else {
  9126. data[j*n_kv + i] = -1;
  9127. }
  9128. }
  9129. }
  9130. }
  9131. }
  9132. }
  9133. // Make sure enough space is available for outputs.
  9134. // Returns max number of outputs for which space was reserved.
  9135. static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
  9136. const auto & cparams = lctx.cparams;
  9137. const auto & hparams = lctx.model.hparams;
  9138. const size_t n_outputs_max = std::max(n_outputs, (size_t) cparams.n_seq_max);
  9139. const auto n_batch = cparams.n_batch;
  9140. const auto n_vocab = hparams.n_vocab;
  9141. const auto n_embd = hparams.n_embd;
  9142. // TODO: use a per-batch flag for logits presence instead
  9143. const bool has_logits = cparams.causal_attn;
  9144. const bool has_embd = cparams.embeddings && (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE);
  9145. const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
  9146. const size_t embd_size = has_embd ? n_embd*n_outputs_max : 0;
  9147. if (lctx.output_ids.empty()) {
  9148. // init, never resized afterwards
  9149. lctx.output_ids.resize(n_batch);
  9150. }
  9151. const size_t prev_size = lctx.buf_output ? ggml_backend_buffer_get_size(lctx.buf_output) : 0;
  9152. const size_t new_size = (logits_size + embd_size) * sizeof(float);
  9153. // alloc only when more than the current capacity is required
  9154. // TODO: also consider shrinking the buffer
  9155. if (!lctx.buf_output || prev_size < new_size) {
  9156. if (lctx.buf_output) {
  9157. #ifndef NDEBUG
  9158. // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark)
  9159. LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
  9160. #endif
  9161. ggml_backend_buffer_free(lctx.buf_output);
  9162. lctx.buf_output = nullptr;
  9163. lctx.logits = nullptr;
  9164. lctx.embd = nullptr;
  9165. }
  9166. lctx.buf_output = ggml_backend_buft_alloc_buffer(llama_default_buffer_type_cpu(true), new_size);
  9167. if (lctx.buf_output == nullptr) {
  9168. LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0));
  9169. return 0;
  9170. }
  9171. }
  9172. float * output_base = (float *) ggml_backend_buffer_get_base(lctx.buf_output);
  9173. lctx.logits = has_logits ? output_base : nullptr;
  9174. lctx.embd = has_embd ? output_base + logits_size : nullptr;
  9175. lctx.output_size = n_outputs_max;
  9176. lctx.logits_size = logits_size;
  9177. lctx.embd_size = embd_size;
  9178. // set all ids as invalid (negative)
  9179. std::fill(lctx.output_ids.begin(), lctx.output_ids.end(), -1);
  9180. ggml_backend_buffer_clear(lctx.buf_output, 0);
  9181. lctx.n_outputs = 0;
  9182. return n_outputs_max;
  9183. }
  9184. static void llama_graph_compute(
  9185. llama_context & lctx,
  9186. ggml_cgraph * gf,
  9187. int n_threads) {
  9188. #ifdef GGML_USE_METAL
  9189. if (ggml_backend_is_metal(lctx.backend_metal)) {
  9190. ggml_backend_metal_set_n_cb(lctx.backend_metal, n_threads);
  9191. }
  9192. #endif
  9193. if (lctx.backend_cpu != nullptr) {
  9194. ggml_backend_cpu_set_n_threads(lctx.backend_cpu, n_threads);
  9195. ggml_backend_cpu_set_abort_callback(lctx.backend_cpu, lctx.abort_callback, lctx.abort_callback_data);
  9196. }
  9197. ggml_backend_sched_graph_compute_async(lctx.sched, gf);
  9198. // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(lctx.sched));
  9199. }
  9200. // decode a batch of tokens by evaluating the transformer
  9201. //
  9202. // - lctx: llama context
  9203. // - batch: batch to evaluate
  9204. //
  9205. // return 0 on success
  9206. // return positive int on warning
  9207. // return negative int on error
  9208. //
  9209. static int llama_decode_internal(
  9210. llama_context & lctx,
  9211. llama_batch batch_all) { // TODO: rename back to batch
  9212. const uint32_t n_tokens_all = batch_all.n_tokens;
  9213. if (n_tokens_all == 0) {
  9214. LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
  9215. return -1;
  9216. }
  9217. const auto & model = lctx.model;
  9218. const auto & hparams = model.hparams;
  9219. const auto & cparams = lctx.cparams;
  9220. GGML_ASSERT((!batch_all.token && batch_all.embd) || (batch_all.token && !batch_all.embd)); // NOLINT
  9221. GGML_ASSERT(n_tokens_all <= cparams.n_batch);
  9222. GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
  9223. if (lctx.t_compute_start_us == 0) {
  9224. lctx.t_compute_start_us = ggml_time_us();
  9225. }
  9226. lctx.n_queued_tokens += n_tokens_all;
  9227. auto & kv_self = lctx.kv_self;
  9228. const int64_t n_embd = hparams.n_embd;
  9229. const int64_t n_vocab = hparams.n_vocab;
  9230. uint32_t n_outputs = 0;
  9231. uint32_t n_outputs_prev = 0;
  9232. const auto n_ubatch = cparams.n_ubatch;
  9233. std::vector<llama_pos> pos;
  9234. std::vector<int32_t> n_seq_id;
  9235. std::vector<llama_seq_id *> seq_id_arr;
  9236. std::vector<std::vector<llama_seq_id>> seq_id;
  9237. // count outputs
  9238. if (batch_all.logits) {
  9239. for (uint32_t i = 0; i < n_tokens_all; ++i) {
  9240. n_outputs += batch_all.logits[i] != 0;
  9241. }
  9242. } else if (lctx.logits_all || (cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE)) {
  9243. n_outputs = n_tokens_all;
  9244. } else {
  9245. // keep last output only
  9246. n_outputs = 1;
  9247. }
  9248. // reserve output buffer
  9249. if (llama_output_reserve(lctx, n_outputs) < n_outputs) {
  9250. LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_outputs);
  9251. return -2;
  9252. };
  9253. // set output mappings
  9254. if (batch_all.logits) {
  9255. int32_t i_logits = 0;
  9256. for (uint32_t i = 0; i < n_tokens_all; ++i) {
  9257. if (batch_all.logits[i]) {
  9258. lctx.output_ids[i] = i_logits++;
  9259. }
  9260. }
  9261. } else {
  9262. for (uint32_t i = 0; i < n_outputs; ++i) {
  9263. lctx.output_ids[i] = i;
  9264. }
  9265. }
  9266. for (uint32_t cur_token = 0; cur_token < n_tokens_all; cur_token += n_ubatch) {
  9267. const uint32_t n_tokens = std::min(n_ubatch, n_tokens_all - cur_token);
  9268. llama_batch u_batch = {
  9269. /* .n_tokens = */ (int32_t) n_tokens,
  9270. /* .token = */ batch_all.token ? batch_all.token + cur_token : nullptr,
  9271. /* .embd = */ batch_all.embd ? batch_all.embd + cur_token*n_embd : nullptr,
  9272. /* .pos = */ batch_all.pos ? batch_all.pos + cur_token : nullptr,
  9273. /* .n_seq_id = */ batch_all.n_seq_id ? batch_all.n_seq_id + cur_token : nullptr,
  9274. /* .seq_id = */ batch_all.seq_id ? batch_all.seq_id + cur_token : nullptr,
  9275. /* .logits = */ batch_all.logits ? batch_all.logits + cur_token : nullptr,
  9276. /* .all_pos_0 = */ batch_all.all_pos_0 + (llama_pos) cur_token*batch_all.all_pos_1,
  9277. /* .all_pos_1 = */ batch_all.all_pos_1,
  9278. /* .all_seq_id = */ batch_all.all_seq_id,
  9279. };
  9280. // count the outputs in this u_batch
  9281. {
  9282. int32_t n_outputs_new = 0;
  9283. if (u_batch.logits) {
  9284. for (uint32_t i = 0; i < n_tokens; i++) {
  9285. n_outputs_new += u_batch.logits[i] != 0;
  9286. }
  9287. } else if (n_outputs == n_tokens_all) {
  9288. n_outputs_new = n_tokens;
  9289. } else {
  9290. // keep last output only
  9291. if (cur_token + n_tokens >= n_tokens_all) {
  9292. n_outputs_new = 1;
  9293. }
  9294. }
  9295. // needs to happen before the graph is built
  9296. lctx.n_outputs = n_outputs_new;
  9297. }
  9298. int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
  9299. GGML_ASSERT(n_threads > 0);
  9300. // helpers for smoother batch API transition
  9301. // after deprecating the llama_eval calls, these will be removed
  9302. if (u_batch.pos == nullptr) {
  9303. pos.resize(n_tokens);
  9304. for (uint32_t i = 0; i < n_tokens; i++) {
  9305. pos[i] = u_batch.all_pos_0 + i*u_batch.all_pos_1;
  9306. }
  9307. u_batch.pos = pos.data();
  9308. }
  9309. if (u_batch.seq_id == nullptr) {
  9310. n_seq_id.resize(n_tokens);
  9311. seq_id.resize(n_tokens);
  9312. seq_id_arr.resize(n_tokens);
  9313. for (uint32_t i = 0; i < n_tokens; i++) {
  9314. n_seq_id[i] = 1;
  9315. seq_id[i].resize(1);
  9316. seq_id[i][0] = u_batch.all_seq_id;
  9317. seq_id_arr[i] = seq_id[i].data();
  9318. }
  9319. u_batch.n_seq_id = n_seq_id.data();
  9320. u_batch.seq_id = seq_id_arr.data();
  9321. }
  9322. // non-causal masks do not use the KV cache
  9323. if (hparams.causal_attn) {
  9324. llama_kv_cache_update(&lctx);
  9325. // if we have enough unused cells before the current head ->
  9326. // better to start searching from the beginning of the cache, hoping to fill it
  9327. if (kv_self.head > kv_self.used + 2*n_tokens) {
  9328. kv_self.head = 0;
  9329. }
  9330. if (!llama_kv_cache_find_slot(kv_self, u_batch)) {
  9331. return 1;
  9332. }
  9333. if (!kv_self.recurrent) {
  9334. // a heuristic, to avoid attending the full cache if it is not yet utilized
  9335. // after enough generations, the benefit from this heuristic disappears
  9336. // if we start defragmenting the cache, the benefit from this will be more important
  9337. const uint32_t pad = llama_kv_cache_get_padding(cparams);
  9338. kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(llama_kv_cache_cell_max(kv_self), pad)));
  9339. //kv_self.n = llama_kv_cache_cell_max(kv_self);
  9340. }
  9341. }
  9342. //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
  9343. ggml_backend_sched_reset(lctx.sched);
  9344. ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data);
  9345. ggml_cgraph * gf = llama_build_graph(lctx, u_batch, false);
  9346. // the output is always the last tensor in the graph
  9347. struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
  9348. struct ggml_tensor * embd = gf->nodes[gf->n_nodes - 2];
  9349. if (lctx.n_outputs == 0) {
  9350. // no output
  9351. res = nullptr;
  9352. embd = nullptr;
  9353. } else if (!hparams.causal_attn) {
  9354. res = nullptr; // do not extract logits for embedding models such as BERT
  9355. // token or sequence embeddings
  9356. embd = gf->nodes[gf->n_nodes - 1];
  9357. GGML_ASSERT(strcmp(embd->name, "result_embd") == 0 || strcmp(embd->name, "result_embd_pooled") == 0);
  9358. } else if (cparams.embeddings) {
  9359. // the embeddings could be in the second to last tensor, or any of the previous tensors
  9360. int i_embd = gf->n_nodes - 2;
  9361. for (int i = 3; strcmp(embd->name, "result_norm") != 0; ++i) {
  9362. i_embd = gf->n_nodes - i;
  9363. if (i_embd < 0) { break; }
  9364. embd = gf->nodes[i_embd];
  9365. }
  9366. GGML_ASSERT(i_embd >= 0 && "missing result_norm tensor");
  9367. // TODO: use a per-batch flag to know when to skip logits while keeping embeddings
  9368. if (!cparams.causal_attn) {
  9369. res = nullptr; // do not extract logits when not needed
  9370. // skip computing logits
  9371. // TODO: is this safe?
  9372. gf->n_nodes = i_embd + 1;
  9373. }
  9374. } else {
  9375. embd = nullptr; // do not extract embeddings when not needed
  9376. GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");
  9377. }
  9378. // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
  9379. // for big prompts, if BLAS is enabled, it is better to use only one thread
  9380. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  9381. // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
  9382. // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
  9383. // with the BLAS calls. need a better solution
  9384. // MoE Special Case: This logic applies when hparams.n_expert == 0, i.e. the model is NOT an MoE model. When an MoE is
  9385. // being processed then Accelerate/BLAS will not be involved, so capping would limit performance.
  9386. if (n_tokens >= 32 && hparams.n_expert == 0 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
  9387. n_threads = std::min(4, n_threads);
  9388. }
  9389. ggml_backend_sched_alloc_graph(lctx.sched, gf);
  9390. llama_set_inputs(lctx, u_batch);
  9391. llama_graph_compute(lctx, gf, n_threads);
  9392. // update the kv ring buffer
  9393. {
  9394. kv_self.head += n_tokens;
  9395. // Ensure kv cache head points to a valid index.
  9396. if (kv_self.head >= kv_self.size) {
  9397. kv_self.head = 0;
  9398. }
  9399. }
  9400. #ifdef GGML_PERF
  9401. // print timing information per ggml operation (for debugging purposes)
  9402. // requires GGML_PERF to be defined
  9403. ggml_graph_print(gf);
  9404. #endif
  9405. // plot the computation graph in dot format (for debugging purposes)
  9406. //if (n_past%100 == 0) {
  9407. // ggml_graph_dump_dot(gf, NULL, "llama.dot");
  9408. //}
  9409. // extract logits
  9410. if (res) {
  9411. ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(lctx.sched, res);
  9412. GGML_ASSERT(backend_res != nullptr);
  9413. GGML_ASSERT(lctx.logits != nullptr);
  9414. float * logits_out = lctx.logits + n_outputs_prev*n_vocab;
  9415. const int32_t n_outputs_new = lctx.n_outputs;
  9416. if (n_outputs_new) {
  9417. GGML_ASSERT( n_outputs_prev + n_outputs_new <= n_outputs);
  9418. GGML_ASSERT((n_outputs_prev + n_outputs_new)*n_vocab <= (int64_t) lctx.logits_size);
  9419. ggml_backend_tensor_get_async(backend_res, res, logits_out, 0, n_outputs_new*n_vocab*sizeof(float));
  9420. }
  9421. }
  9422. // extract embeddings
  9423. if (embd) {
  9424. ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(lctx.sched, embd);
  9425. GGML_ASSERT(backend_embd != nullptr);
  9426. switch (cparams.pooling_type) {
  9427. case LLAMA_POOLING_TYPE_NONE:
  9428. {
  9429. // extract token embeddings
  9430. GGML_ASSERT(lctx.embd != nullptr);
  9431. float * embd_out = lctx.embd + n_outputs_prev*n_embd;
  9432. const int32_t n_outputs_new = lctx.n_outputs;
  9433. if (n_outputs_new) {
  9434. GGML_ASSERT( n_outputs_prev + n_outputs_new <= n_outputs);
  9435. GGML_ASSERT((n_outputs_prev + n_outputs_new)*n_embd <= (int64_t) lctx.embd_size);
  9436. ggml_backend_tensor_get_async(backend_embd, embd, embd_out, 0, n_outputs_new*n_embd*sizeof(float));
  9437. }
  9438. } break;
  9439. case LLAMA_POOLING_TYPE_CLS:
  9440. case LLAMA_POOLING_TYPE_MEAN:
  9441. {
  9442. GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0);
  9443. // extract sequence embeddings
  9444. auto & embd_seq_out = lctx.embd_seq;
  9445. embd_seq_out.clear();
  9446. for (uint32_t i = 0; i < n_tokens; i++) {
  9447. const llama_seq_id seq_id = u_batch.seq_id[i][0];
  9448. if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
  9449. continue;
  9450. }
  9451. embd_seq_out[seq_id].resize(n_embd);
  9452. ggml_backend_tensor_get_async(backend_embd, embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
  9453. }
  9454. } break;
  9455. case LLAMA_POOLING_TYPE_UNSPECIFIED:
  9456. {
  9457. GGML_ASSERT(false && "unknown pooling type");
  9458. } break;
  9459. }
  9460. }
  9461. n_outputs_prev += lctx.n_outputs;
  9462. }
  9463. // set to total number of outputs in the batch, for use in llama_get_logits_ith
  9464. lctx.n_outputs = n_outputs;
  9465. // wait for the computation to finish (automatically done when obtaining the model output)
  9466. //llama_synchronize(&lctx);
  9467. // decide if we need to defrag the kv cache
  9468. if (cparams.causal_attn && cparams.defrag_thold >= 0.0f) {
  9469. const float fragmentation = kv_self.n >= 128 ? 1.0f - float(kv_self.used)/float(kv_self.n) : 0.0f;
  9470. // queue defragmentation for next llama_kv_cache_update
  9471. if (fragmentation > cparams.defrag_thold) {
  9472. //LLAMA_LOG_INFO("fragmentation: %.2f\n", fragmentation);
  9473. llama_kv_cache_defrag(kv_self);
  9474. }
  9475. }
  9476. // Reset state for the next token before backend sync, to allow the CPU activities in the reset to
  9477. // overlap with device computation.
  9478. ggml_backend_sched_reset(lctx.sched);
  9479. return 0;
  9480. }
  9481. // find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
  9482. static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
  9483. auto & kv_self = lctx.kv_self;
  9484. const auto & hparams = lctx.model.hparams;
  9485. const uint32_t n_layer = hparams.n_layer;
  9486. const uint32_t n_kv = llama_kv_cache_cell_max(kv_self);
  9487. const uint32_t n_used = kv_self.used;
  9488. assert(n_used <= n_kv);
  9489. //const int64_t t_start = ggml_time_us();
  9490. // number of cells moved
  9491. uint32_t n_moves = 0;
  9492. // each move requires 6*n_layer tensors (see build_defrag)
  9493. // - source view, destination view, copy operation
  9494. // - x2 for keys and values
  9495. //const uint32_t max_moves = LLAMA_MAX_NODES/(6*n_layer);
  9496. // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516
  9497. const uint32_t max_moves = (LLAMA_MAX_NODES - 2*n_layer)/(6*n_layer);
  9498. // determine which KV cells to move where
  9499. //
  9500. // cell i moves to ids[i]
  9501. //
  9502. // if ids[i] == i || ids[i] == n_kv, then cell i is not moved
  9503. //
  9504. std::vector<uint32_t> ids(n_kv, n_kv);
  9505. for (uint32_t i0 = 0; i0 < n_used; ++i0) {
  9506. const auto & cell0 = kv_self.cells[i0];
  9507. if (!cell0.is_empty()) {
  9508. ids[i0] = i0;
  9509. continue;
  9510. }
  9511. // found a hole - fill it with data from the end of the cache
  9512. uint32_t nh = 1;
  9513. // determine the size of the hole
  9514. while (i0 + nh < n_used && kv_self.cells[i0 + nh].is_empty()) {
  9515. nh++;
  9516. }
  9517. uint32_t nf = 0;
  9518. uint32_t is = n_kv - 1;
  9519. // starting from the end, find nh non-empty cells
  9520. for (; is > i0; --is) {
  9521. const auto & cell1 = kv_self.cells[is];
  9522. if (cell1.is_empty() || ids[is] != n_kv) {
  9523. continue;
  9524. }
  9525. // non-empty cell which is not yet moved
  9526. nf++;
  9527. if (nf == nh) {
  9528. break;
  9529. }
  9530. }
  9531. // this can only happen if `n_used` is not accurate, which would be a bug
  9532. GGML_ASSERT(nf == nh && "KV defrag bug: nf != nh");
  9533. nf = 0;
  9534. uint32_t i1 = is;
  9535. // are we moving a continuous block of memory?
  9536. bool cont = false;
  9537. // should we stop searching for the next move?
  9538. bool stop = false;
  9539. // go back and move the nf cells to the hole
  9540. for (; i1 < n_kv; ++i1) {
  9541. auto & cell1 = kv_self.cells[i1];
  9542. if (cell1.is_empty() || ids[i1] != n_kv) {
  9543. if (n_moves == max_moves) {
  9544. stop = true;
  9545. break;
  9546. }
  9547. cont = false;
  9548. continue;
  9549. }
  9550. // this cell goes to (i0 + nf)
  9551. ids[i1] = i0 + nf;
  9552. // move the cell meta data
  9553. kv_self.cells[i0 + nf] = cell1;
  9554. // clear the old cell and move the head there
  9555. cell1 = llama_kv_cell();
  9556. kv_self.head = n_used;
  9557. if (!cont) {
  9558. n_moves++;
  9559. cont = true;
  9560. }
  9561. nf++;
  9562. if (nf == nh) {
  9563. break;
  9564. }
  9565. }
  9566. if (stop || n_moves == max_moves) {
  9567. break;
  9568. }
  9569. //LLAMA_LOG_INFO("(tmp log) KV defrag: move [%u, %u) to [%u, %u)\n", is, i1 + 1, i0, i0 + nh);
  9570. i0 += nh - 1;
  9571. }
  9572. if (n_moves == 0) {
  9573. return;
  9574. }
  9575. //LLAMA_LOG_INFO("(tmp log) KV defrag cell moves: %u\n", n_moves);
  9576. //LLAMA_LOG_INFO("expected gf nodes: %u\n", 6*n_moves*n_layer);
  9577. #if 0
  9578. // CPU defrag
  9579. //
  9580. // TODO: optimizations are possible:
  9581. // - multiple threads
  9582. // - avoid copying to the host memory when already there
  9583. //
  9584. // likely not worth the effort, as we have ggml_graph based defrag
  9585. //
  9586. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  9587. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  9588. const uint32_t kv_size = kv_self.size;
  9589. std::vector<uint8_t> buf_k;
  9590. std::vector<uint8_t> buf_v;
  9591. for (uint32_t il = 0; il < n_layer; ++il) {
  9592. const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
  9593. const size_t k_size = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*kv_size);
  9594. const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
  9595. const size_t v_size = ggml_row_size (kv_self.v_l[il]->type, n_embd_v_gqa*kv_size);
  9596. buf_k.resize(k_size);
  9597. buf_v.resize(v_size);
  9598. ggml_backend_tensor_get(kv_self.k_l[il], buf_k.data(), 0, buf_k.size());
  9599. ggml_backend_tensor_get(kv_self.v_l[il], buf_v.data(), 0, buf_v.size());
  9600. // batch move [i, i+nm) to [id, id+nm)
  9601. // note: cells can move only to a lower index
  9602. for (uint32_t i = 0; i < n_kv; ++i) {
  9603. const uint32_t id = ids[i];
  9604. if (i == id || id == n_kv) {
  9605. continue;
  9606. }
  9607. uint32_t nm = 1;
  9608. while (i + nm < n_kv && ids[i + nm] == id + nm) {
  9609. nm++;
  9610. }
  9611. // move keys
  9612. {
  9613. const int64_t os = i*k_size_row;
  9614. const int64_t od = id*k_size_row;
  9615. memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row);
  9616. }
  9617. // move values (note: they are transposed)
  9618. {
  9619. const int64_t os = i;
  9620. const int64_t od = id;
  9621. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  9622. memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el);
  9623. }
  9624. }
  9625. i += nm - 1;
  9626. }
  9627. ggml_backend_tensor_set(kv_self.k_l[il], buf_k.data(), 0, buf_k.size());
  9628. ggml_backend_tensor_set(kv_self.v_l[il], buf_v.data(), 0, buf_v.size());
  9629. }
  9630. #else
  9631. // ggml_graph defrag
  9632. ggml_backend_sched_reset(lctx.sched);
  9633. ggml_cgraph * gf = llama_build_graph_defrag(lctx, ids);
  9634. llama_graph_compute(lctx, gf, lctx.cparams.n_threads);
  9635. #endif
  9636. //const int64_t t_end = ggml_time_us();
  9637. //LLAMA_LOG_INFO("(tmp log) KV defrag time: %.3f ms\n", (t_end - t_start)/1000.0);
  9638. }
  9639. static void llama_kv_cache_update_internal(struct llama_context & lctx) {
  9640. bool need_reserve = false;
  9641. // apply K-shift if needed
  9642. if (lctx.model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE && lctx.kv_self.has_shift) {
  9643. {
  9644. ggml_backend_sched_reset(lctx.sched);
  9645. ggml_cgraph * gf = llama_build_graph_k_shift(lctx);
  9646. ggml_backend_sched_alloc_graph(lctx.sched, gf);
  9647. llama_set_k_shift(lctx);
  9648. llama_graph_compute(lctx, gf, lctx.cparams.n_threads);
  9649. need_reserve = true;
  9650. }
  9651. {
  9652. auto & kv_self = lctx.kv_self;
  9653. kv_self.has_shift = false;
  9654. for (uint32_t i = 0; i < kv_self.size; ++i) {
  9655. kv_self.cells[i].delta = 0;
  9656. }
  9657. }
  9658. }
  9659. if (lctx.kv_self.recurrent && lctx.kv_self.do_copy) {
  9660. {
  9661. ggml_backend_sched_reset(lctx.sched);
  9662. ggml_cgraph * gf = llama_build_graph_s_copy(lctx);
  9663. ggml_backend_sched_alloc_graph(lctx.sched, gf);
  9664. llama_set_s_copy(lctx);
  9665. llama_graph_compute(lctx, gf, lctx.cparams.n_threads);
  9666. need_reserve = true;
  9667. }
  9668. {
  9669. auto & kv_self = lctx.kv_self;
  9670. kv_self.do_copy = false;
  9671. for (uint32_t i = 0; i < kv_self.size; ++i) {
  9672. kv_self.cells[i].src = i;
  9673. }
  9674. }
  9675. }
  9676. // defragment the KV cache if needed
  9677. if (lctx.kv_self.do_defrag) {
  9678. llama_kv_cache_defrag_internal(lctx);
  9679. need_reserve = true;
  9680. lctx.kv_self.do_defrag = false;
  9681. }
  9682. // reserve a worst case graph again
  9683. if (need_reserve) {
  9684. // TODO: extract to a function
  9685. // build worst-case graph
  9686. int n_tokens = (int)std::min(lctx.cparams.n_ctx, lctx.cparams.n_ubatch);
  9687. int n_past = lctx.cparams.n_ctx - n_tokens;
  9688. llama_token token = llama_token_bos(&lctx.model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  9689. ggml_cgraph * gf = llama_build_graph(lctx, llama_batch_get_one(&token, n_tokens, n_past, 0), true);
  9690. // initialize scheduler with the worst-case graph
  9691. ggml_backend_sched_reset(lctx.sched);
  9692. if (!ggml_backend_sched_reserve(lctx.sched, gf)) {
  9693. LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
  9694. }
  9695. }
  9696. }
  9697. //
  9698. // tokenizer
  9699. //
  9700. static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
  9701. return vocab.type;
  9702. }
  9703. static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
  9704. GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
  9705. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL;
  9706. }
  9707. static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
  9708. GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
  9709. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_UNKNOWN;
  9710. }
  9711. static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
  9712. GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
  9713. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_CONTROL;
  9714. }
  9715. static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
  9716. GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
  9717. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE;
  9718. }
  9719. static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id) {
  9720. GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
  9721. return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_USER_DEFINED;
  9722. }
  9723. static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
  9724. GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
  9725. GGML_ASSERT(llama_is_byte_token(vocab, id));
  9726. const auto & token_data = vocab.id_to_token.at(id);
  9727. switch (llama_vocab_get_type(vocab)) {
  9728. case LLAMA_VOCAB_TYPE_SPM: {
  9729. auto buf = token_data.text.substr(3, 2);
  9730. return strtol(buf.c_str(), NULL, 16);
  9731. }
  9732. case LLAMA_VOCAB_TYPE_BPE: {
  9733. GGML_ASSERT(false);
  9734. return unicode_utf8_to_byte(token_data.text); // TODO: why is this here after GGML_ASSERT?
  9735. }
  9736. case LLAMA_VOCAB_TYPE_WPM: {
  9737. GGML_ASSERT(false);
  9738. }
  9739. default:
  9740. GGML_ASSERT(false);
  9741. }
  9742. }
  9743. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
  9744. GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
  9745. static const char * hex = "0123456789ABCDEF";
  9746. switch (llama_vocab_get_type(vocab)) {
  9747. case LLAMA_VOCAB_TYPE_SPM: {
  9748. const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
  9749. auto token = vocab.token_to_id.find(buf);
  9750. if (token != vocab.token_to_id.end()) {
  9751. return (*token).second;
  9752. }
  9753. // Try to fall back to just the byte as a string
  9754. const char buf2[2] = { (char)ch, 0 };
  9755. return vocab.token_to_id.at(buf2);
  9756. }
  9757. case LLAMA_VOCAB_TYPE_WPM:
  9758. case LLAMA_VOCAB_TYPE_BPE: {
  9759. return vocab.token_to_id.at(unicode_byte_to_utf8(ch));
  9760. }
  9761. default:
  9762. GGML_ASSERT(false);
  9763. }
  9764. }
  9765. static void llama_escape_whitespace(std::string & text) {
  9766. replace_all(text, " ", "\xe2\x96\x81");
  9767. }
  9768. static void llama_unescape_whitespace(std::string & word) {
  9769. replace_all(word, "\xe2\x96\x81", " ");
  9770. }
  9771. struct llm_symbol {
  9772. using index = int;
  9773. index prev;
  9774. index next;
  9775. const char * text;
  9776. size_t n;
  9777. };
  9778. static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
  9779. // SPM tokenizer
  9780. // original implementation:
  9781. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  9782. struct llm_bigram_spm {
  9783. struct comparator {
  9784. bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
  9785. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  9786. }
  9787. };
  9788. using queue_storage = std::vector<llm_bigram_spm>;
  9789. using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
  9790. llm_symbol::index left;
  9791. llm_symbol::index right;
  9792. float score;
  9793. size_t size;
  9794. };
  9795. struct llm_tokenizer_spm {
  9796. llm_tokenizer_spm(const llama_vocab & vocab) : vocab(vocab) {}
  9797. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  9798. // split string into utf8 chars
  9799. int index = 0;
  9800. size_t offs = 0;
  9801. while (offs < text.size()) {
  9802. llm_symbol sym;
  9803. size_t len = utf8_len(text[offs]);
  9804. sym.text = text.c_str() + offs;
  9805. sym.n = std::min(len, text.size() - offs);
  9806. offs += sym.n;
  9807. sym.prev = index - 1;
  9808. sym.next = offs == text.size() ? -1 : index + 1;
  9809. index++;
  9810. symbols.emplace_back(sym);
  9811. }
  9812. // seed the work queue with all possible 2-character tokens.
  9813. for (size_t i = 1; i < symbols.size(); ++i) {
  9814. try_add_bigram(i - 1, i);
  9815. }
  9816. // keep substituting the highest frequency pairs for as long as we can.
  9817. while (!work_queue.empty()) {
  9818. auto bigram = work_queue.top();
  9819. work_queue.pop();
  9820. auto & left_sym = symbols[bigram.left];
  9821. auto & right_sym = symbols[bigram.right];
  9822. // if one of the symbols already got merged, skip it.
  9823. if (left_sym.n == 0 || right_sym.n == 0 ||
  9824. left_sym.n + right_sym.n != bigram.size) {
  9825. continue;
  9826. }
  9827. // merge the right sym into the left one
  9828. left_sym.n += right_sym.n;
  9829. right_sym.n = 0;
  9830. //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  9831. // remove the right sym from the chain
  9832. left_sym.next = right_sym.next;
  9833. if (right_sym.next >= 0) {
  9834. symbols[right_sym.next].prev = bigram.left;
  9835. }
  9836. // find more substitutions
  9837. try_add_bigram(left_sym.prev, bigram.left);
  9838. try_add_bigram(bigram.left, left_sym.next);
  9839. }
  9840. for (int i = 0; i != -1; i = symbols[i].next) {
  9841. auto & symbol = symbols[i];
  9842. resegment(symbol, output);
  9843. }
  9844. }
  9845. private:
  9846. void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
  9847. auto text = std::string(symbol.text, symbol.n);
  9848. auto token = vocab.token_to_id.find(text);
  9849. // Do we need to support is_unused?
  9850. if (token != vocab.token_to_id.end()) {
  9851. output.push_back((*token).second);
  9852. return;
  9853. }
  9854. const auto p = rev_merge.find(text);
  9855. if (p == rev_merge.end()) {
  9856. // output any symbols that did not form tokens as bytes.
  9857. output.reserve(output.size() + symbol.n);
  9858. for (int j = 0; j < (int)symbol.n; ++j) {
  9859. llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
  9860. output.push_back(token_id);
  9861. }
  9862. return;
  9863. }
  9864. resegment(symbols[p->second.first], output);
  9865. resegment(symbols[p->second.second], output);
  9866. }
  9867. void try_add_bigram(int left, int right) {
  9868. if (left == -1 || right == -1) {
  9869. return;
  9870. }
  9871. const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
  9872. auto token = vocab.token_to_id.find(text);
  9873. if (token == vocab.token_to_id.end()) {
  9874. return;
  9875. }
  9876. if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
  9877. return;
  9878. }
  9879. const auto & tok_data = vocab.id_to_token[(*token).second];
  9880. llm_bigram_spm bigram;
  9881. bigram.left = left;
  9882. bigram.right = right;
  9883. bigram.score = tok_data.score;
  9884. bigram.size = text.size();
  9885. work_queue.push(bigram);
  9886. // Do we need to support is_unused?
  9887. rev_merge[text] = std::make_pair(left, right);
  9888. }
  9889. const llama_vocab & vocab;
  9890. std::vector<llm_symbol> symbols;
  9891. llm_bigram_spm::queue work_queue;
  9892. std::map<std::string, std::pair<int, int>> rev_merge;
  9893. };
  9894. // BPE tokenizer
  9895. // adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
  9896. // tried to simplify unicode stuff, so most likely does not work 100% correctly!
  9897. // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
  9898. struct llm_bigram_bpe {
  9899. struct comparator {
  9900. bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
  9901. return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
  9902. }
  9903. };
  9904. using queue_storage = std::vector<llm_bigram_bpe>;
  9905. using queue = std::priority_queue<llm_bigram_bpe, queue_storage, comparator>;
  9906. llm_symbol::index left;
  9907. llm_symbol::index right;
  9908. std::string text;
  9909. int rank;
  9910. size_t size;
  9911. };
  9912. struct llm_tokenizer_bpe {
  9913. llm_tokenizer_bpe(const llama_vocab & vocab): vocab(vocab) {}
  9914. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  9915. int final_prev_index = -1;
  9916. bool ignore_merges = false;
  9917. std::vector<std::string> word_collection;
  9918. switch (vocab.type) {
  9919. case LLAMA_VOCAB_TYPE_BPE:
  9920. switch (vocab.type_pre) {
  9921. case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
  9922. ignore_merges = true;
  9923. word_collection = unicode_regex_split(text, {
  9924. // original regex from tokenizer.json
  9925. //"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
  9926. // adapted: https://github.com/ggerganov/llama.cpp/pull/6920#issuecomment-2080233989
  9927. "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
  9928. });
  9929. break;
  9930. case LLAMA_VOCAB_PRE_TYPE_DBRX:
  9931. word_collection = unicode_regex_split(text, {
  9932. // same as llama3
  9933. "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
  9934. });
  9935. break;
  9936. case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM:
  9937. word_collection = unicode_regex_split(text, {
  9938. "[\r\n]",
  9939. "\\s?[A-Za-zµÀ-ÖØ-öø-ƺƼ-ƿDŽ-ʓʕ-ʯͰ-ͳͶͷͻ-ͽͿΆΈ-ΊΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-ՖႠ-ჅᎠ-Ᏽᏸ-ᏽᲐ-ᲺᲽ-Ჿᴀ-ᴫᵫ-ᵷᵹ-ᶚḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙὛὝὟ-ώᾀ-ᾴᾶ-ᾼιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼℂℇℊ-ℓℕℙ-ℝℤΩℨK-ℭℯ-ℴℹℼ-ℿⅅ-ⅉⅎↃↄⰀ-ⱻⱾ-ⳤⳫ-ⳮⳲⳳꙀ-ꙭꚀ-ꚛꜢ-ꝯꝱ-ꞇꞋ-ꞎꭰ-ꮿff-stﬓ-ﬗA-Za-z𐐀-𐑏𐒰-𐓓𐓘-𐓻𐲀-𐲲𐳀-𐳲𑢠-𑣟𞤀-𞥃]+",
  9940. "\\s?[!-/:-~!-/:-~‘-‟ -。]+",
  9941. "\\s+$",
  9942. "[一-龥ࠀ-一가-퟿]+",
  9943. "\\p{N}+",
  9944. });
  9945. break;
  9946. case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER:
  9947. word_collection = unicode_regex_split(text, {
  9948. "[\r\n]",
  9949. "\\s?\\p{L}+",
  9950. "\\s?\\p{P}+",
  9951. "[一-龥ࠀ-一가-퟿]+",
  9952. "\\p{N}",
  9953. });
  9954. break;
  9955. case LLAMA_VOCAB_PRE_TYPE_FALCON:
  9956. word_collection = unicode_regex_split(text, {
  9957. "[\\p{P}\\$\\+<=>\\^~\\|]+",
  9958. "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
  9959. "[0-9][0-9][0-9]",
  9960. });
  9961. break;
  9962. case LLAMA_VOCAB_PRE_TYPE_MPT:
  9963. // TODO: MPT pre-tokenization regexes are unknown
  9964. // the following are close, but not exact. run the following:
  9965. // ./bin/test-tokenizer-0 ../models/ggml-vocab-mpt.gguf
  9966. GGML_ASSERT("MPT pre-tokenization regexes are unknown - fixes needed");
  9967. word_collection = unicode_regex_split(text, {
  9968. "\\s?\\p{L}+",
  9969. "\\s?\\p{P}+",
  9970. "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
  9971. });
  9972. break;
  9973. case LLAMA_VOCAB_PRE_TYPE_STARCODER:
  9974. case LLAMA_VOCAB_PRE_TYPE_REFACT:
  9975. case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
  9976. word_collection = unicode_regex_split(text, {
  9977. "\\p{N}",
  9978. "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
  9979. });
  9980. break;
  9981. case LLAMA_VOCAB_PRE_TYPE_GPT2:
  9982. case LLAMA_VOCAB_PRE_TYPE_OLMO:
  9983. word_collection = unicode_regex_split(text, {
  9984. "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
  9985. });
  9986. break;
  9987. case LLAMA_VOCAB_PRE_TYPE_STABLELM2:
  9988. case LLAMA_VOCAB_PRE_TYPE_QWEN2:
  9989. word_collection = unicode_regex_split(text, {
  9990. // original regex from tokenizer.json
  9991. // "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
  9992. "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
  9993. });
  9994. break;
  9995. default:
  9996. // default regex for BPE tokenization pre-processing
  9997. word_collection = unicode_regex_split(text, {
  9998. "[\\p{P}\\$\\+<=>\\^~\\|]+",
  9999. "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
  10000. "\\p{N}+",
  10001. "[0-9][0-9][0-9]",
  10002. });
  10003. break;
  10004. }
  10005. break;
  10006. default:
  10007. GGML_ASSERT(false);
  10008. break;
  10009. }
  10010. symbols_final.clear();
  10011. for (auto & word : word_collection) {
  10012. work_queue = llm_bigram_bpe::queue();
  10013. symbols.clear();
  10014. int index = 0;
  10015. size_t offset = 0;
  10016. if (ignore_merges && vocab.token_to_id.find(word) != vocab.token_to_id.end()) {
  10017. symbols.emplace_back(llm_symbol{-1, -1, word.c_str(), word.size()});
  10018. offset = word.size();
  10019. }
  10020. while (offset < word.size()) {
  10021. llm_symbol sym;
  10022. size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
  10023. sym.text = word.c_str() + offset;
  10024. sym.n = char_len;
  10025. offset += sym.n;
  10026. sym.prev = index - 1;
  10027. sym.next = offset == word.size() ? -1 : index + 1;
  10028. index++;
  10029. symbols.emplace_back(sym);
  10030. }
  10031. for (size_t i = 1; i < symbols.size(); ++i) {
  10032. add_new_bigram(i - 1, i);
  10033. }
  10034. // build token(s)
  10035. while (!work_queue.empty()) {
  10036. auto bigram = work_queue.top();
  10037. work_queue.pop();
  10038. auto & left_symbol = symbols[bigram.left];
  10039. auto & right_symbol = symbols[bigram.right];
  10040. if (left_symbol.n == 0 || right_symbol.n == 0) {
  10041. continue;
  10042. }
  10043. std::string left_token = std::string(left_symbol.text, left_symbol.n);
  10044. std::string right_token = std::string(right_symbol.text, right_symbol.n);
  10045. if (left_token + right_token != bigram.text) {
  10046. continue; // Skip this bigram if it's outdated
  10047. }
  10048. // merge the right sym into the left one
  10049. left_symbol.n += right_symbol.n;
  10050. right_symbol.n = 0;
  10051. // remove the right sym from the chain
  10052. left_symbol.next = right_symbol.next;
  10053. if (right_symbol.next >= 0) {
  10054. symbols[right_symbol.next].prev = bigram.left;
  10055. }
  10056. add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
  10057. add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
  10058. }
  10059. // add the finished tokens to the final list keeping correct order for next and prev
  10060. for (auto & sym : symbols) {
  10061. if (sym.n > 0) {
  10062. sym.prev = final_prev_index;
  10063. sym.next = -1;
  10064. if (final_prev_index != -1) {
  10065. symbols_final[final_prev_index].next = symbols_final.size();
  10066. }
  10067. symbols_final.emplace_back(sym);
  10068. final_prev_index = symbols_final.size() - 1;
  10069. }
  10070. }
  10071. }
  10072. symbols = symbols_final;
  10073. if (!symbols.empty()) {
  10074. for (int i = 0; i != -1; i = symbols[i].next) {
  10075. auto & symbol = symbols[i];
  10076. if (symbol.n == 0) {
  10077. continue;
  10078. }
  10079. const std::string str = std::string(symbol.text, symbol.n);
  10080. const auto token = vocab.token_to_id.find(str);
  10081. if (token == vocab.token_to_id.end()) {
  10082. for (auto j = str.begin(); j != str.end(); ++j) {
  10083. std::string byte_str(1, *j);
  10084. auto token_multibyte = vocab.token_to_id.find(byte_str);
  10085. if (token_multibyte == vocab.token_to_id.end()) {
  10086. throw std::runtime_error("ERROR: byte not found in vocab");
  10087. }
  10088. output.push_back((*token_multibyte).second);
  10089. }
  10090. } else {
  10091. output.push_back((*token).second);
  10092. }
  10093. }
  10094. }
  10095. }
  10096. private:
  10097. void add_new_bigram(int left, int right) {
  10098. if (left == -1 || right == -1) {
  10099. return;
  10100. }
  10101. std::string left_token = std::string(symbols[left].text, symbols[left].n);
  10102. std::string right_token = std::string(symbols[right].text, symbols[right].n);
  10103. int rank_found = -1;
  10104. rank_found = vocab.find_bpe_rank(left_token, right_token);
  10105. if (rank_found < 0) {
  10106. return;
  10107. }
  10108. llm_bigram_bpe bigram;
  10109. bigram.left = left;
  10110. bigram.right = right;
  10111. bigram.text = left_token + right_token;
  10112. bigram.size = left_token.size() + right_token.size();
  10113. bigram.rank = rank_found;
  10114. work_queue.push(bigram);
  10115. }
  10116. const llama_vocab & vocab;
  10117. std::vector<llm_symbol> symbols;
  10118. std::vector<llm_symbol> symbols_final;
  10119. llm_bigram_bpe::queue work_queue;
  10120. };
  10121. struct llm_tokenizer_wpm {
  10122. llm_tokenizer_wpm(const llama_vocab & vocab): vocab(vocab) {}
  10123. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  10124. auto * token_map = &vocab.token_to_id;
  10125. // normalize and split by whitespace
  10126. std::vector<std::string> words = preprocess(text);
  10127. // bos token prepended already
  10128. // find the longest tokens that form the words
  10129. for (const std::string &word : words) {
  10130. // skip empty words
  10131. if (word.size() == 0) {
  10132. continue;
  10133. }
  10134. // prepend phantom space
  10135. std::string word1 = "\xe2\x96\x81" + word;
  10136. int n = word1.size();
  10137. // we're at the start of a new word
  10138. int i = 0;
  10139. bool match_any = false;
  10140. // move through character position in word
  10141. while (i < n) {
  10142. // loop through possible match length
  10143. bool match = false;
  10144. for (int j = n; j > i; j--) {
  10145. auto it = token_map->find(word1.substr(i, j - i));
  10146. if (it != token_map->end()) {
  10147. output.push_back(it->second);
  10148. match = true;
  10149. match_any = true;
  10150. i = j;
  10151. break;
  10152. }
  10153. }
  10154. // must be an unknown character
  10155. if (!match) {
  10156. i++;
  10157. }
  10158. }
  10159. // we didn't find any matches for this word
  10160. if (!match_any) {
  10161. output.push_back(vocab.special_unk_id);
  10162. }
  10163. }
  10164. }
  10165. std::vector<std::string> preprocess(const std::string & text) {
  10166. std::vector<uint32_t> cpts_nfd = unicode_cpts_normalize_nfd(unicode_cpts_from_utf8(text));
  10167. // strip accents, strip control, uniformize whitespace,
  10168. // to lowercase, pad chinese characters, pad punctuation
  10169. std::string new_str = "";
  10170. for (uint32_t code : cpts_nfd) {
  10171. const codepoint_flags flags = unicode_cpt_flags(code);
  10172. if (flags.is_accent_mark || flags.is_control) {
  10173. continue;
  10174. }
  10175. code = unicode_tolower(code);
  10176. if (flags.is_separator || flags.is_whitespace) { //####FIXME: is_separator ?
  10177. code = ' ';
  10178. }
  10179. std::string s = unicode_cpt_to_utf8(code);
  10180. if (flags.is_punctuation || is_ascii_punct(code) || is_chinese_char(code)) {
  10181. new_str += " ";
  10182. new_str += s;
  10183. new_str += " ";
  10184. } else {
  10185. new_str += s;
  10186. }
  10187. }
  10188. // split by whitespace
  10189. uint64_t l = 0;
  10190. uint64_t r = 0;
  10191. std::vector<std::string> words;
  10192. while (r < new_str.size()) {
  10193. // if is whitespace
  10194. if (isspace(new_str[r], std::locale::classic())) {
  10195. if (r > l) words.push_back(new_str.substr(l, (r - l)));
  10196. l = r + 1;
  10197. r = l;
  10198. } else {
  10199. r += 1;
  10200. }
  10201. }
  10202. if (r > l) {
  10203. words.push_back(new_str.substr(l, (r - l)));
  10204. }
  10205. return words;
  10206. }
  10207. bool is_ascii_punct(uint32_t code) {
  10208. if (code > 0xFF) {
  10209. return false;
  10210. }
  10211. auto c = char(static_cast<unsigned char>(code));
  10212. return ispunct(c, std::locale::classic());
  10213. }
  10214. bool is_chinese_char(uint32_t cpt) {
  10215. if ((cpt >= 0x4E00 && cpt <= 0x9FFF) ||
  10216. (cpt >= 0x3400 && cpt <= 0x4DBF) ||
  10217. (cpt >= 0x20000 && cpt <= 0x2A6DF) ||
  10218. (cpt >= 0x2A700 && cpt <= 0x2B73F) ||
  10219. (cpt >= 0x2B740 && cpt <= 0x2B81F) ||
  10220. (cpt >= 0x2B920 && cpt <= 0x2CEAF) || // this should be 0x2B820 but in hf rust code it is 0x2B920
  10221. (cpt >= 0xF900 && cpt <= 0xFAFF) ||
  10222. (cpt >= 0x2F800 && cpt <= 0x2FA1F) ||
  10223. (cpt >= 0x3000 && cpt <= 0x303F) ||
  10224. (cpt >= 0xFF00 && cpt <= 0xFFEF)) {
  10225. return true; // NOLINT
  10226. }
  10227. return false;
  10228. }
  10229. const llama_vocab & vocab;
  10230. };
  10231. typedef enum FRAGMENT_BUFFER_VARIANT_TYPE {
  10232. FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
  10233. FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
  10234. } FRAGMENT_BUFFER_VARIANT_TYPE;
  10235. struct fragment_buffer_variant {
  10236. fragment_buffer_variant(llama_vocab::id _token)
  10237. :
  10238. type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
  10239. token(_token),
  10240. raw_text(_dummy),
  10241. offset(0),
  10242. length(0) {}
  10243. fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
  10244. :
  10245. type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
  10246. token((llama_vocab::id) - 1),
  10247. raw_text(_raw_text),
  10248. offset(_offset),
  10249. length(_length){
  10250. GGML_ASSERT(_offset >= 0);
  10251. GGML_ASSERT(_length >= 1);
  10252. GGML_ASSERT(offset + length <= raw_text.length());
  10253. }
  10254. const FRAGMENT_BUFFER_VARIANT_TYPE type;
  10255. const llama_vocab::id token;
  10256. const std::string _dummy;
  10257. const std::string & raw_text;
  10258. const uint64_t offset;
  10259. const uint64_t length;
  10260. };
  10261. // #define PRETOKENIZERDEBUG
  10262. static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer) {
  10263. // for each special token
  10264. for (const auto & st: vocab.special_tokens_cache) {
  10265. const auto & special_token = st.first;
  10266. const auto & special_id = st.second;
  10267. // for each text fragment
  10268. std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
  10269. while (it != buffer.end()) {
  10270. auto & fragment = (*it);
  10271. // if a fragment is text ( not yet processed )
  10272. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  10273. auto * raw_text = &(fragment.raw_text);
  10274. auto raw_text_base_offset = fragment.offset;
  10275. auto raw_text_base_length = fragment.length;
  10276. // loop over the text
  10277. while (true) {
  10278. // find the first occurrence of a given special token in this fragment
  10279. // passing offset argument only limit the "search area" but match coordinates
  10280. // are still relative to the source full raw_text
  10281. auto match = raw_text->find(special_token, raw_text_base_offset);
  10282. // no occurrences found, stop processing this fragment for a given special token
  10283. if (match == std::string::npos) break;
  10284. // check if match is within bounds of offset <-> length
  10285. if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
  10286. #ifdef PRETOKENIZERDEBUG
  10287. LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  10288. #endif
  10289. auto source = std::distance(buffer.begin(), it);
  10290. // if match is further than base offset
  10291. // then we have some text to the left of it
  10292. if (match > raw_text_base_offset) {
  10293. // left
  10294. const int64_t left_reminder_offset = raw_text_base_offset + 0;
  10295. const int64_t left_reminder_length = match - raw_text_base_offset;
  10296. buffer.emplace_after(it, (*raw_text), left_reminder_offset, left_reminder_length);
  10297. #ifdef PRETOKENIZERDEBUG
  10298. LLAMA_LOG_WARN("FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
  10299. #endif
  10300. it++;
  10301. }
  10302. // special token
  10303. buffer.emplace_after(it, special_id);
  10304. it++;
  10305. // right
  10306. if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
  10307. const int64_t right_reminder_offset = match + special_token.length();
  10308. const int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
  10309. buffer.emplace_after(it, (*raw_text), right_reminder_offset, right_reminder_length);
  10310. #ifdef PRETOKENIZERDEBUG
  10311. LLAMA_LOG_WARN("FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
  10312. #endif
  10313. it++;
  10314. if (source == 0) {
  10315. buffer.erase_after(buffer.before_begin());
  10316. } else {
  10317. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  10318. }
  10319. // repeat for the right side
  10320. raw_text_base_offset = right_reminder_offset;
  10321. raw_text_base_length = right_reminder_length;
  10322. #ifdef PRETOKENIZERDEBUG
  10323. LLAMA_LOG_WARN("RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  10324. #endif
  10325. } else {
  10326. if (source == 0) {
  10327. buffer.erase_after(buffer.before_begin());
  10328. } else {
  10329. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  10330. }
  10331. break;
  10332. }
  10333. }
  10334. }
  10335. it++;
  10336. }
  10337. }
  10338. }
  10339. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool add_special, bool parse_special) {
  10340. std::vector<llama_vocab::id> output;
  10341. std::forward_list<fragment_buffer_variant> fragment_buffer;
  10342. if (!raw_text.empty()) {
  10343. fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
  10344. if (parse_special) tokenizer_st_partition(vocab, fragment_buffer);
  10345. }
  10346. switch (vocab.type) {
  10347. case LLAMA_VOCAB_TYPE_SPM:
  10348. {
  10349. // OG tokenizer behavior:
  10350. //
  10351. // tokenizer.encode('', add_special_tokens=True) returns [1]
  10352. // tokenizer.encode('', add_special_tokens=False) returns []
  10353. static const bool rtrim = true; //TODO: as param
  10354. bool is_prev_special = false;
  10355. bool special_token_rtrim = false;
  10356. if (add_special && vocab.special_add_bos != 0) {
  10357. GGML_ASSERT(vocab.special_bos_id != -1);
  10358. output.push_back(vocab.special_bos_id);
  10359. is_prev_special = true;
  10360. }
  10361. for (const auto & fragment : fragment_buffer) {
  10362. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  10363. // without adding this leading whitespace, we do not get the same results as the original tokenizer
  10364. // TODO: It's likely possible to get rid of this string copy entirely
  10365. // by modifying llm_tokenizer_x to operate with string offsets like pre-tokenizer
  10366. // and passing 'add space prefix' as bool argument
  10367. //
  10368. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  10369. if (special_token_rtrim) {
  10370. size_t num_whitespaces = 0;
  10371. while (isspace(raw_text[num_whitespaces])) {
  10372. num_whitespaces++;
  10373. }
  10374. if (num_whitespaces == raw_text.size()) {
  10375. continue; // skip if all whitespaces
  10376. }
  10377. raw_text = raw_text.substr(num_whitespaces);
  10378. }
  10379. if (vocab.add_space_prefix) {
  10380. if (!output.size() || is_prev_special) { // prefix with space if first token
  10381. raw_text = " " + raw_text;
  10382. }
  10383. }
  10384. #ifdef PRETOKENIZERDEBUG
  10385. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  10386. #endif
  10387. llm_tokenizer_spm tokenizer(vocab);
  10388. llama_escape_whitespace(raw_text);
  10389. tokenizer.tokenize(raw_text, output);
  10390. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  10391. output.push_back(fragment.token);
  10392. is_prev_special = true;
  10393. // phi-3 special tokens without rtrim, works fine for llama-spm too
  10394. special_token_rtrim = rtrim
  10395. && fragment.token != vocab.special_bos_id
  10396. && fragment.token != vocab.special_unk_id
  10397. && fragment.token != vocab.special_eos_id;
  10398. }
  10399. }
  10400. if (add_special && vocab.special_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) {
  10401. LLAMA_LOG_WARN(
  10402. "%s: Added a BOS token to the prompt as specified by the model but the prompt "
  10403. "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
  10404. "Are you sure this is what you want?\n", __FUNCTION__);
  10405. }
  10406. if (add_special && vocab.special_add_eos == 1) {
  10407. GGML_ASSERT(vocab.special_eos_id != -1);
  10408. output.push_back(vocab.special_eos_id);
  10409. }
  10410. } break;
  10411. case LLAMA_VOCAB_TYPE_BPE:
  10412. {
  10413. if (add_special && vocab.special_add_bos != 0) {
  10414. GGML_ASSERT(vocab.special_bos_id != -1);
  10415. output.push_back(vocab.special_bos_id);
  10416. }
  10417. for (const auto & fragment : fragment_buffer) {
  10418. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  10419. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  10420. #ifdef PRETOKENIZERDEBUG
  10421. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  10422. #endif
  10423. llm_tokenizer_bpe tokenizer(vocab);
  10424. tokenizer.tokenize(raw_text, output);
  10425. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  10426. output.push_back(fragment.token);
  10427. }
  10428. }
  10429. if (add_special && vocab.special_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) {
  10430. LLAMA_LOG_WARN(
  10431. "%s: Added a BOS token to the prompt as specified by the model but the prompt "
  10432. "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
  10433. "Are you sure this is what you want?\n", __FUNCTION__);
  10434. }
  10435. if (add_special && vocab.special_add_eos == 1) {
  10436. GGML_ASSERT(vocab.special_add_eos != -1);
  10437. output.push_back(vocab.special_eos_id);
  10438. }
  10439. } break;
  10440. case LLAMA_VOCAB_TYPE_WPM:
  10441. {
  10442. if (add_special) {
  10443. GGML_ASSERT(vocab.special_cls_id != -1);
  10444. output.push_back(vocab.special_cls_id);
  10445. }
  10446. for (const auto & fragment : fragment_buffer) {
  10447. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  10448. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  10449. #ifdef PRETOKENIZERDEBUG
  10450. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  10451. #endif
  10452. llm_tokenizer_wpm tokenizer(vocab);
  10453. tokenizer.tokenize(raw_text, output);
  10454. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  10455. output.push_back(fragment.token);
  10456. }
  10457. }
  10458. if (add_special) {
  10459. GGML_ASSERT(vocab.special_sep_id != -1);
  10460. output.push_back(vocab.special_sep_id);
  10461. }
  10462. } break;
  10463. case LLAMA_VOCAB_TYPE_NONE:
  10464. GGML_ASSERT(false);
  10465. }
  10466. return output;
  10467. }
  10468. //
  10469. // grammar - internal
  10470. //
  10471. // Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
  10472. // pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
  10473. std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
  10474. const std::string & src,
  10475. llama_partial_utf8 partial_start) {
  10476. static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
  10477. const char * pos = src.c_str();
  10478. std::vector<uint32_t> code_points;
  10479. // common english strings have the same number of codepoints and bytes. `+ 1` for the terminating 0.
  10480. code_points.reserve(src.size() + 1);
  10481. uint32_t value = partial_start.value;
  10482. int n_remain = partial_start.n_remain;
  10483. // continue previous decode, if applicable
  10484. while (*pos != 0 && n_remain > 0) {
  10485. uint8_t next_byte = static_cast<uint8_t>(*pos);
  10486. if ((next_byte >> 6) != 2) {
  10487. // invalid sequence, abort
  10488. code_points.push_back(0);
  10489. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, -1 });
  10490. }
  10491. value = (value << 6) + (next_byte & 0x3F);
  10492. ++pos;
  10493. --n_remain;
  10494. }
  10495. if (partial_start.n_remain > 0 && n_remain == 0) {
  10496. code_points.push_back(value);
  10497. }
  10498. // decode any subsequent utf-8 sequences, which may end in an incomplete one
  10499. while (*pos != 0) {
  10500. uint8_t first_byte = static_cast<uint8_t>(*pos);
  10501. uint8_t highbits = first_byte >> 4;
  10502. n_remain = lookup[highbits] - 1;
  10503. if (n_remain < 0) {
  10504. // invalid sequence, abort
  10505. code_points.clear();
  10506. code_points.push_back(0);
  10507. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, n_remain });
  10508. }
  10509. uint8_t mask = (1 << (7 - n_remain)) - 1;
  10510. value = first_byte & mask;
  10511. ++pos;
  10512. while (*pos != 0 && n_remain > 0) {
  10513. value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
  10514. ++pos;
  10515. --n_remain;
  10516. }
  10517. if (n_remain == 0) {
  10518. code_points.push_back(value);
  10519. }
  10520. }
  10521. code_points.push_back(0);
  10522. return std::make_pair(std::move(code_points), llama_partial_utf8{ value, n_remain });
  10523. }
  10524. // returns true iff pos points to the end of one of the definitions of a rule
  10525. static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
  10526. switch (pos->type) {
  10527. case LLAMA_GRETYPE_END: return true; // NOLINT
  10528. case LLAMA_GRETYPE_ALT: return true; // NOLINT
  10529. default: return false;
  10530. }
  10531. }
  10532. // returns true iff chr satisfies the char range at pos (regular or inverse range)
  10533. // asserts that pos is pointing to a char range element
  10534. static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
  10535. const llama_grammar_element * pos,
  10536. const uint32_t chr) {
  10537. bool found = false;
  10538. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  10539. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); // NOLINT
  10540. do {
  10541. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  10542. // inclusive range, e.g. [a-z]
  10543. found = found || (pos->value <= chr && chr <= pos[1].value);
  10544. pos += 2;
  10545. } else {
  10546. // exact char match, e.g. [a] or "a"
  10547. found = found || pos->value == chr;
  10548. pos += 1;
  10549. }
  10550. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  10551. return std::make_pair(found == is_positive_char, pos);
  10552. }
  10553. // returns true iff some continuation of the given partial UTF-8 sequence could satisfy the char
  10554. // range at pos (regular or inverse range)
  10555. // asserts that pos is pointing to a char range element
  10556. static bool llama_grammar_match_partial_char(
  10557. const llama_grammar_element * pos,
  10558. const llama_partial_utf8 partial_utf8) {
  10559. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  10560. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  10561. uint32_t partial_value = partial_utf8.value;
  10562. int n_remain = partial_utf8.n_remain;
  10563. // invalid sequence or 7-bit char split across 2 bytes (overlong)
  10564. if (n_remain < 0 || (n_remain == 1 && partial_value < 2)) {
  10565. return false;
  10566. }
  10567. // range of possible code points this partial UTF-8 sequence could complete to
  10568. uint32_t low = partial_value << (n_remain * 6);
  10569. uint32_t high = low | ((1 << (n_remain * 6)) - 1);
  10570. if (low == 0) {
  10571. if (n_remain == 2) {
  10572. low = 1 << 11;
  10573. } else if (n_remain == 3) {
  10574. low = 1 << 16;
  10575. }
  10576. }
  10577. do {
  10578. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  10579. // inclusive range, e.g. [a-z]
  10580. if (pos->value <= high && low <= pos[1].value) {
  10581. return is_positive_char;
  10582. }
  10583. pos += 2;
  10584. } else {
  10585. // exact char match, e.g. [a] or "a"
  10586. if (low <= pos->value && pos->value <= high) {
  10587. return is_positive_char;
  10588. }
  10589. pos += 1;
  10590. }
  10591. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  10592. return !is_positive_char;
  10593. }
  10594. // transforms a grammar pushdown stack into N possible stacks, all ending
  10595. // at a character range (terminal element)
  10596. static void llama_grammar_advance_stack(
  10597. const std::vector<std::vector<llama_grammar_element>> & rules,
  10598. const std::vector<const llama_grammar_element *> & stack,
  10599. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  10600. if (stack.empty()) {
  10601. if (std::find(new_stacks.begin(), new_stacks.end(), stack) == new_stacks.end()) {
  10602. new_stacks.emplace_back(stack);
  10603. }
  10604. return;
  10605. }
  10606. const llama_grammar_element * pos = stack.back();
  10607. switch (pos->type) {
  10608. case LLAMA_GRETYPE_RULE_REF: {
  10609. const size_t rule_id = static_cast<size_t>(pos->value);
  10610. const llama_grammar_element * subpos = rules[rule_id].data();
  10611. do {
  10612. // init new stack without the top (pos)
  10613. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  10614. if (!llama_grammar_is_end_of_sequence(pos + 1)) {
  10615. // if this rule ref is followed by another element, add that to stack
  10616. new_stack.push_back(pos + 1);
  10617. }
  10618. if (!llama_grammar_is_end_of_sequence(subpos)) {
  10619. // if alternate is nonempty, add to stack
  10620. new_stack.push_back(subpos);
  10621. }
  10622. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  10623. while (!llama_grammar_is_end_of_sequence(subpos)) {
  10624. // scan to end of alternate def
  10625. subpos++;
  10626. }
  10627. if (subpos->type == LLAMA_GRETYPE_ALT) {
  10628. // there's another alternate def of this rule to process
  10629. subpos++;
  10630. } else {
  10631. break;
  10632. }
  10633. } while (true);
  10634. break;
  10635. }
  10636. case LLAMA_GRETYPE_CHAR:
  10637. case LLAMA_GRETYPE_CHAR_NOT:
  10638. if (std::find(new_stacks.begin(), new_stacks.end(), stack) == new_stacks.end()) {
  10639. // only add the stack if it's not a duplicate of one we already have
  10640. new_stacks.emplace_back(stack);
  10641. }
  10642. break;
  10643. default:
  10644. // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
  10645. // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
  10646. // those
  10647. GGML_ASSERT(false);
  10648. }
  10649. }
  10650. // takes a set of possible pushdown stacks on a grammar, which are required to
  10651. // be positioned at a character range (see `llama_grammar_advance_stack`), and
  10652. // produces the N possible stacks if the given char is accepted at those
  10653. // positions
  10654. void llama_grammar_accept(
  10655. const std::vector<std::vector<llama_grammar_element>> & rules,
  10656. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  10657. const uint32_t chr,
  10658. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  10659. new_stacks.clear();
  10660. for (const auto & stack : stacks) {
  10661. if (stack.empty()) {
  10662. continue;
  10663. }
  10664. auto match = llama_grammar_match_char(stack.back(), chr);
  10665. if (match.first) {
  10666. const llama_grammar_element * pos = match.second;
  10667. // update top of stack to next element, if any
  10668. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  10669. if (!llama_grammar_is_end_of_sequence(pos)) {
  10670. new_stack.push_back(pos);
  10671. }
  10672. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  10673. }
  10674. }
  10675. }
  10676. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  10677. const std::vector<std::vector<llama_grammar_element>> & rules,
  10678. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  10679. const std::vector<llama_grammar_candidate> & candidates);
  10680. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
  10681. const std::vector<std::vector<llama_grammar_element>> & rules,
  10682. const std::vector<const llama_grammar_element *> & stack,
  10683. const std::vector<llama_grammar_candidate> & candidates) {
  10684. std::vector<llama_grammar_candidate> rejects;
  10685. rejects.reserve(candidates.size());
  10686. if (stack.empty()) {
  10687. for (const auto & tok : candidates) {
  10688. if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
  10689. rejects.push_back(tok);
  10690. }
  10691. }
  10692. return rejects;
  10693. }
  10694. const llama_grammar_element * stack_pos = stack.back();
  10695. std::vector<llama_grammar_candidate> next_candidates;
  10696. next_candidates.reserve(candidates.size());
  10697. for (const auto & tok : candidates) {
  10698. if (*tok.code_points == 0) {
  10699. // reached end of full codepoints in token, reject iff it ended in a partial sequence
  10700. // that cannot satisfy this position in grammar
  10701. if (tok.partial_utf8.n_remain != 0 &&
  10702. !llama_grammar_match_partial_char(stack_pos, tok.partial_utf8)) {
  10703. rejects.push_back(tok);
  10704. }
  10705. } else if (llama_grammar_match_char(stack_pos, *tok.code_points).first) {
  10706. next_candidates.push_back({ tok.index, tok.code_points + 1, tok.partial_utf8 });
  10707. } else {
  10708. rejects.push_back(tok);
  10709. }
  10710. }
  10711. const auto * stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
  10712. // update top of stack to next element, if any
  10713. std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
  10714. if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
  10715. stack_after.push_back(stack_pos_after);
  10716. }
  10717. std::vector<std::vector<const llama_grammar_element *>> next_stacks;
  10718. llama_grammar_advance_stack(rules, stack_after, next_stacks);
  10719. auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
  10720. for (const auto & tok : next_rejects) {
  10721. rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
  10722. }
  10723. return rejects;
  10724. }
  10725. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  10726. const std::vector<std::vector<llama_grammar_element>> & rules,
  10727. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  10728. const std::vector<llama_grammar_candidate> & candidates) {
  10729. GGML_ASSERT(!stacks.empty()); // REVIEW
  10730. if (candidates.empty()) {
  10731. return std::vector<llama_grammar_candidate>();
  10732. }
  10733. auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
  10734. for (size_t i = 1, size = stacks.size(); i < size; ++i) {
  10735. rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
  10736. }
  10737. return rejects;
  10738. }
  10739. static bool llama_grammar_detect_left_recursion(
  10740. const std::vector<std::vector<llama_grammar_element>> & rules,
  10741. size_t rule_index,
  10742. std::vector<bool> * rules_visited,
  10743. std::vector<bool> * rules_in_progress,
  10744. std::vector<bool> * rules_may_be_empty) {
  10745. if ((*rules_in_progress)[rule_index]) {
  10746. return true;
  10747. }
  10748. (*rules_in_progress)[rule_index] = true;
  10749. const std::vector<llama_grammar_element> & rule = rules[rule_index];
  10750. // First check if the rule might produce the empty string. This could be done combined with the second
  10751. // step but it's more readable as two steps.
  10752. bool at_rule_start = true;
  10753. for (size_t i = 0; i < rule.size(); i++) {
  10754. if (llama_grammar_is_end_of_sequence(&rule[i])) {
  10755. if (at_rule_start) {
  10756. (*rules_may_be_empty)[rule_index] = true;
  10757. break;
  10758. }
  10759. at_rule_start = true;
  10760. } else {
  10761. at_rule_start = false;
  10762. }
  10763. }
  10764. // Second, recurse into leftmost nonterminals (or next-leftmost as long as the previous nonterminal may
  10765. // be empty)
  10766. bool recurse_into_nonterminal = true;
  10767. for (size_t i = 0; i < rule.size(); i++) {
  10768. if (rule[i].type == LLAMA_GRETYPE_RULE_REF && recurse_into_nonterminal) {
  10769. if (llama_grammar_detect_left_recursion(rules, (size_t)rule[i].value, rules_visited, rules_in_progress, rules_may_be_empty)) {
  10770. return true;
  10771. }
  10772. if (!((*rules_may_be_empty)[(size_t)rule[i].value])) {
  10773. recurse_into_nonterminal = false;
  10774. }
  10775. } else if (llama_grammar_is_end_of_sequence(&rule[i])) {
  10776. recurse_into_nonterminal = true;
  10777. } else {
  10778. recurse_into_nonterminal = false;
  10779. }
  10780. }
  10781. (*rules_in_progress)[rule_index] = false;
  10782. (*rules_visited)[rule_index] = true;
  10783. return false;
  10784. }
  10785. //
  10786. // grammar - external
  10787. //
  10788. struct llama_grammar * llama_grammar_init(
  10789. const llama_grammar_element ** rules,
  10790. size_t n_rules,
  10791. size_t start_rule_index) {
  10792. const llama_grammar_element * pos;
  10793. // copy rule definitions into vectors
  10794. std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
  10795. for (size_t i = 0; i < n_rules; i++) {
  10796. for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
  10797. vec_rules[i].push_back(*pos);
  10798. }
  10799. vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
  10800. }
  10801. // Check for left recursion
  10802. std::vector<bool> rules_visited(n_rules);
  10803. std::vector<bool> rules_in_progress(n_rules);
  10804. std::vector<bool> rules_may_be_empty(n_rules);
  10805. for (size_t i = 0; i < n_rules; i++) {
  10806. if (rules_visited[i]) {
  10807. continue;
  10808. }
  10809. if (llama_grammar_detect_left_recursion(vec_rules, i, &rules_visited, &rules_in_progress, &rules_may_be_empty)) {
  10810. throw std::runtime_error(format("unsupported grammar, left recursion detected for nonterminal at index %zu", i));
  10811. }
  10812. }
  10813. // loop over alternates of start rule to build initial stacks
  10814. std::vector<std::vector<const llama_grammar_element *>> stacks;
  10815. pos = vec_rules[start_rule_index].data();
  10816. do {
  10817. std::vector<const llama_grammar_element *> stack;
  10818. if (!llama_grammar_is_end_of_sequence(pos)) {
  10819. // if alternate is nonempty, add to stack
  10820. stack.push_back(pos);
  10821. }
  10822. llama_grammar_advance_stack(vec_rules, stack, stacks);
  10823. while (!llama_grammar_is_end_of_sequence(pos)) {
  10824. // scan to end of alternate def
  10825. pos++;
  10826. }
  10827. if (pos->type == LLAMA_GRETYPE_ALT) {
  10828. // there's another alternate def of this rule to process
  10829. pos++;
  10830. } else {
  10831. break;
  10832. }
  10833. } while (true);
  10834. // Important: vec_rules has to be moved here, not copied, because stacks contains
  10835. // pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
  10836. // then the pointers would be invalidated when the local vec_rules goes out of scope.
  10837. return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} };
  10838. }
  10839. void llama_grammar_free(struct llama_grammar * grammar) {
  10840. delete grammar;
  10841. }
  10842. struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar) {
  10843. llama_grammar * result = new llama_grammar{ grammar->rules, grammar->stacks, grammar->partial_utf8 };
  10844. // redirect elements in stacks to point to new rules
  10845. for (size_t is = 0; is < result->stacks.size(); is++) {
  10846. for (size_t ie = 0; ie < result->stacks[is].size(); ie++) {
  10847. for (size_t ir0 = 0; ir0 < grammar->rules.size(); ir0++) {
  10848. for (size_t ir1 = 0; ir1 < grammar->rules[ir0].size(); ir1++) {
  10849. if (grammar->stacks[is][ie] == &grammar->rules[ir0][ir1]) {
  10850. result->stacks[is][ie] = &result->rules[ir0][ir1];
  10851. }
  10852. }
  10853. }
  10854. }
  10855. }
  10856. return result;
  10857. }
  10858. //
  10859. // sampling
  10860. //
  10861. void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
  10862. if (seed == LLAMA_DEFAULT_SEED) {
  10863. seed = time(NULL);
  10864. }
  10865. ctx->rng.seed(seed);
  10866. }
  10867. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  10868. GGML_ASSERT(candidates->size > 0);
  10869. const int64_t t_start_sample_us = ggml_time_us();
  10870. // Sort the logits in descending order
  10871. if (!candidates->sorted) {
  10872. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  10873. return a.logit > b.logit;
  10874. });
  10875. candidates->sorted = true;
  10876. }
  10877. float max_l = candidates->data[0].logit;
  10878. float cum_sum = 0.0f;
  10879. for (size_t i = 0; i < candidates->size; ++i) {
  10880. float p = expf(candidates->data[i].logit - max_l);
  10881. candidates->data[i].p = p;
  10882. cum_sum += p;
  10883. }
  10884. for (size_t i = 0; i < candidates->size; ++i) {
  10885. candidates->data[i].p /= cum_sum;
  10886. }
  10887. if (ctx) {
  10888. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  10889. }
  10890. }
  10891. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int32_t k, size_t min_keep) {
  10892. // TODO: move bucket sort to separate function so that top_p/tail_free/typical/softmax first is equally fast
  10893. // if (k >= (int32_t)candidates->size) {
  10894. // return;
  10895. // }
  10896. const int64_t t_start_sample_us = ggml_time_us();
  10897. if (k <= 0) {
  10898. k = candidates->size;
  10899. }
  10900. k = std::max(k, (int) min_keep);
  10901. k = std::min(k, (int) candidates->size);
  10902. // Sort scores in descending order
  10903. if (!candidates->sorted) {
  10904. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  10905. return a.logit > b.logit;
  10906. };
  10907. if (k <= 128) {
  10908. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  10909. } else {
  10910. constexpr int nbuckets = 128;
  10911. constexpr float bucket_low = -10.0f;
  10912. constexpr float bucket_high = 10.0f;
  10913. constexpr float bucket_scale = nbuckets/(bucket_high - bucket_low);
  10914. constexpr float bucker_inter = -bucket_low * bucket_scale;
  10915. std::vector<int> bucket_idx(candidates->size);
  10916. std::vector<int> histo(nbuckets, 0);
  10917. for (int i = 0; i < (int)candidates->size; ++i) {
  10918. const float val = candidates->data[i].logit;
  10919. int ib = int(bucket_scale * val + bucker_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low);
  10920. ib = std::max(0, std::min(nbuckets-1, ib));
  10921. bucket_idx[i] = ib;
  10922. ++histo[ib];
  10923. }
  10924. int nhave = 0;
  10925. int ib = nbuckets - 1;
  10926. for ( ; ib >= 0; --ib) {
  10927. nhave += histo[ib];
  10928. if (nhave >= k) break;
  10929. }
  10930. std::vector<llama_token_data> tmp_tokens(nhave);
  10931. auto ptr = tmp_tokens.data();
  10932. std::vector<llama_token_data*> bucket_ptrs;
  10933. bucket_ptrs.reserve(nbuckets - ib);
  10934. for (int j = nbuckets - 1; j >= ib; --j) {
  10935. bucket_ptrs.push_back(ptr);
  10936. ptr += histo[j];
  10937. }
  10938. for (int i = 0; i < (int)candidates->size; ++i) {
  10939. int j = bucket_idx[i];
  10940. if (j >= ib) {
  10941. *bucket_ptrs[nbuckets-1-j]++ = candidates->data[i];
  10942. }
  10943. }
  10944. ptr = tmp_tokens.data();
  10945. int ndone = 0;
  10946. for (int j = nbuckets-1; j > ib; --j) {
  10947. std::sort(ptr, ptr + histo[j], comp);
  10948. ptr += histo[j];
  10949. ndone += histo[j];
  10950. }
  10951. std::partial_sort(ptr, ptr + k - ndone, ptr + histo[ib], comp);
  10952. std::memcpy(candidates->data, tmp_tokens.data(), k*sizeof(llama_token_data));
  10953. }
  10954. candidates->sorted = true;
  10955. }
  10956. candidates->size = k;
  10957. if (ctx) {
  10958. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  10959. }
  10960. }
  10961. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  10962. if (p >= 1.0f) {
  10963. return;
  10964. }
  10965. llama_sample_softmax(ctx, candidates);
  10966. const int64_t t_start_sample_us = ggml_time_us();
  10967. // Compute the cumulative probabilities
  10968. float cum_sum = 0.0f;
  10969. size_t last_idx = candidates->size;
  10970. for (size_t i = 0; i < candidates->size; ++i) {
  10971. cum_sum += candidates->data[i].p;
  10972. // Check if the running sum is at least p or if we have kept at least min_keep tokens
  10973. // we set the last index to i+1 to indicate that the current iterate should be included in the set
  10974. if (cum_sum >= p && i + 1 >= min_keep) {
  10975. last_idx = i + 1;
  10976. break;
  10977. }
  10978. }
  10979. // Resize the output vector to keep only the top-p tokens
  10980. candidates->size = last_idx;
  10981. if (ctx) {
  10982. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  10983. }
  10984. }
  10985. void llama_sample_min_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  10986. if (p <= 0.0f || !candidates->size) {
  10987. return;
  10988. }
  10989. const int64_t t_start_sample_us = ggml_time_us();
  10990. bool min_p_applied = false;
  10991. // if the candidates aren't sorted, try the unsorted implementation first
  10992. if (!candidates->sorted) {
  10993. std::vector<llama_token_data> filtered_tokens;
  10994. float max_logit = -FLT_MAX;
  10995. for (size_t i = 0; i < candidates->size; ++i) {
  10996. max_logit = std::max(max_logit, candidates->data[i].logit);
  10997. }
  10998. const float min_logit = max_logit + logf(p); // min logit for p_i >= p * p_max
  10999. for (size_t i = 0; i < candidates->size; ++i) {
  11000. if (candidates->data[i].logit >= min_logit) {
  11001. filtered_tokens.push_back(candidates->data[i]);
  11002. }
  11003. }
  11004. // if we have enough values the operation was a success
  11005. if (filtered_tokens.size() >= min_keep) {
  11006. memcpy(candidates->data, filtered_tokens.data(), filtered_tokens.size()*sizeof(llama_token_data));
  11007. candidates->size = filtered_tokens.size();
  11008. min_p_applied = true;
  11009. }
  11010. }
  11011. // if the candidates are sorted or the unsorted implementation failed, use this implementation
  11012. if (!min_p_applied) {
  11013. // Sort the logits in descending order
  11014. if (!candidates->sorted) {
  11015. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  11016. return a.logit > b.logit;
  11017. });
  11018. candidates->sorted = true;
  11019. }
  11020. const float min_logit = candidates->data[0].logit + logf(p); // min logit for p_i >= p * p_max
  11021. size_t i = 1; // first token always matches
  11022. for (; i < candidates->size; ++i) {
  11023. if (candidates->data[i].logit < min_logit && i >= min_keep) {
  11024. break; // prob too small
  11025. }
  11026. }
  11027. // Resize the output vector to keep only the matching tokens
  11028. candidates->size = i;
  11029. }
  11030. if (ctx) {
  11031. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11032. }
  11033. }
  11034. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  11035. if (z >= 1.0f || candidates->size <= 2) {
  11036. return;
  11037. }
  11038. llama_sample_softmax(nullptr, candidates);
  11039. const int64_t t_start_sample_us = ggml_time_us();
  11040. // Compute the first and second derivatives
  11041. std::vector<float> first_derivatives(candidates->size - 1);
  11042. std::vector<float> second_derivatives(candidates->size - 2);
  11043. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  11044. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  11045. }
  11046. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  11047. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  11048. }
  11049. // Calculate absolute value of second derivatives
  11050. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  11051. second_derivatives[i] = std::abs(second_derivatives[i]);
  11052. }
  11053. // Normalize the second derivatives
  11054. {
  11055. const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  11056. if (second_derivatives_sum > 1e-6f) {
  11057. for (float & value : second_derivatives) {
  11058. value /= second_derivatives_sum;
  11059. }
  11060. } else {
  11061. for (float & value : second_derivatives) {
  11062. value = 1.0f / second_derivatives.size();
  11063. }
  11064. }
  11065. }
  11066. float cum_sum = 0.0f;
  11067. size_t last_idx = candidates->size;
  11068. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  11069. cum_sum += second_derivatives[i];
  11070. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  11071. if (cum_sum > z && i >= min_keep) {
  11072. last_idx = i;
  11073. break;
  11074. }
  11075. }
  11076. // Resize the output vector to keep only the tokens above the tail location
  11077. candidates->size = last_idx;
  11078. if (ctx) {
  11079. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11080. }
  11081. }
  11082. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  11083. // Reference implementation:
  11084. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  11085. if (p >= 1.0f) {
  11086. return;
  11087. }
  11088. // Compute the softmax of logits and calculate entropy
  11089. llama_sample_softmax(nullptr, candidates);
  11090. const int64_t t_start_sample_us = ggml_time_us();
  11091. float entropy = 0.0f;
  11092. for (size_t i = 0; i < candidates->size; ++i) {
  11093. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  11094. }
  11095. // Compute the absolute difference between negative log probability and entropy for each candidate
  11096. std::vector<float> shifted_scores;
  11097. for (size_t i = 0; i < candidates->size; ++i) {
  11098. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  11099. shifted_scores.push_back(shifted_score);
  11100. }
  11101. // Sort tokens based on the shifted_scores and their corresponding indices
  11102. std::vector<size_t> indices(candidates->size);
  11103. std::iota(indices.begin(), indices.end(), 0);
  11104. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  11105. return shifted_scores[a] < shifted_scores[b];
  11106. });
  11107. // Compute the cumulative probabilities
  11108. float cum_sum = 0.0f;
  11109. size_t last_idx = indices.size();
  11110. for (size_t i = 0; i < indices.size(); ++i) {
  11111. size_t idx = indices[i];
  11112. cum_sum += candidates->data[idx].p;
  11113. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  11114. if (cum_sum > p && i >= min_keep - 1) {
  11115. last_idx = i + 1;
  11116. break;
  11117. }
  11118. }
  11119. // Resize the output vector to keep only the locally typical tokens
  11120. std::vector<llama_token_data> new_candidates;
  11121. for (size_t i = 0; i < last_idx; ++i) {
  11122. size_t idx = indices[i];
  11123. new_candidates.push_back(candidates->data[idx]);
  11124. }
  11125. // Replace the data in candidates with the new_candidates data
  11126. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  11127. candidates->size = new_candidates.size();
  11128. candidates->sorted = false;
  11129. if (ctx) {
  11130. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11131. }
  11132. }
  11133. void llama_sample_entropy(struct llama_context * ctx, llama_token_data_array * candidates_p, float min_temp, float max_temp, float exponent_val) {
  11134. const int64_t t_start_sample_us = ggml_time_us();
  11135. // no need to do anything if there is only one (or zero) candidates
  11136. if(candidates_p->size <= 1) {
  11137. return;
  11138. }
  11139. // Calculate maximum possible entropy
  11140. float max_entropy = -logf(1.0f / candidates_p->size);
  11141. llama_sample_softmax(nullptr, candidates_p);
  11142. // Calculate entropy of the softmax probabilities
  11143. float entropy = 0.0f;
  11144. for (size_t i = 0; i < candidates_p->size; ++i) {
  11145. float prob = candidates_p->data[i].p;
  11146. if (prob > 0.0f) { // Ensure no log(0)
  11147. entropy -= prob * logf(prob);
  11148. }
  11149. }
  11150. // Normalize the entropy (max_entropy cannot be 0 here because we checked candidates_p->size != 1 above)
  11151. float normalized_entropy = entropy / max_entropy;
  11152. // Map the normalized entropy to the desired temperature range using the power function
  11153. float dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent_val);
  11154. #ifdef DEBUG
  11155. LLAMA_LOG_INFO("Your text maxtemp value is: %f\n", max_temp);
  11156. LLAMA_LOG_INFO("Entropy: %f\n", entropy);
  11157. LLAMA_LOG_INFO("Max Possible Entropy: %f\n", max_entropy);
  11158. LLAMA_LOG_INFO("Normalized Entropy: %f\n", normalized_entropy);
  11159. LLAMA_LOG_INFO("Exponent: %f\n", exponent_val);
  11160. LLAMA_LOG_INFO("Dynamic Temperature (dyn_temp): %f\n", dyn_temp);
  11161. #endif
  11162. // Apply the dynamically calculated temperature scaling
  11163. for (size_t i = 0; i < candidates_p->size; ++i) {
  11164. candidates_p->data[i].logit /= dyn_temp;
  11165. }
  11166. // Re-compute softmax probabilities after scaling logits with dynamic temperature
  11167. double max_l_double = candidates_p->data[0].logit;
  11168. double cum_sum_double = 0.0;
  11169. for (size_t i = 0; i < candidates_p->size; ++i) {
  11170. double p = exp(candidates_p->data[i].logit - max_l_double);
  11171. candidates_p->data[i].p = p; // Store the scaled probability
  11172. cum_sum_double += p;
  11173. }
  11174. for (size_t i = 0; i < candidates_p->size; ++i) {
  11175. candidates_p->data[i].p /= cum_sum_double; // Re-normalize the probabilities
  11176. }
  11177. #ifdef DEBUG
  11178. // Print the updated top 25 probabilities after temperature scaling
  11179. LLAMA_LOG_INFO("\nUpdated Top 25 Probabilities After Dynamic Temperature Scaling (in percentages):\n");
  11180. for (size_t i = 0; i < 25 && i < candidates_p->size; ++i) {
  11181. LLAMA_LOG_INFO("Token %zu: %f%%\n", i + 1, candidates_p->data[i].p * 100.0f);
  11182. }
  11183. #endif
  11184. if (ctx) {
  11185. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11186. }
  11187. }
  11188. void llama_sample_temp(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  11189. const int64_t t_start_sample_us = ggml_time_us();
  11190. for (size_t i = 0; i < candidates_p->size; ++i) {
  11191. candidates_p->data[i].logit /= temp;
  11192. }
  11193. if (ctx) {
  11194. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11195. }
  11196. }
  11197. void llama_sample_repetition_penalties(
  11198. struct llama_context * ctx,
  11199. llama_token_data_array * candidates,
  11200. const llama_token * last_tokens,
  11201. size_t penalty_last_n,
  11202. float penalty_repeat,
  11203. float penalty_freq,
  11204. float penalty_present) {
  11205. if (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f)) {
  11206. return;
  11207. }
  11208. const int64_t t_start_sample_us = ggml_time_us();
  11209. // Create a frequency map to count occurrences of each token in last_tokens
  11210. std::unordered_map<llama_token, int> token_count;
  11211. for (size_t i = 0; i < penalty_last_n; ++i) {
  11212. token_count[last_tokens[i]]++;
  11213. }
  11214. // Apply frequency and presence penalties to the candidates
  11215. for (size_t i = 0; i < candidates->size; ++i) {
  11216. const auto token_iter = token_count.find(candidates->data[i].id);
  11217. if (token_iter == token_count.end()) {
  11218. continue;
  11219. }
  11220. const int count = token_iter->second;
  11221. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  11222. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  11223. if (candidates->data[i].logit <= 0) {
  11224. candidates->data[i].logit *= penalty_repeat;
  11225. } else {
  11226. candidates->data[i].logit /= penalty_repeat;
  11227. }
  11228. candidates->data[i].logit -= float(count) * penalty_freq + float(count > 0) * penalty_present;
  11229. }
  11230. candidates->sorted = false;
  11231. if (ctx) {
  11232. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11233. }
  11234. }
  11235. void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
  11236. GGML_ASSERT(ctx);
  11237. const int64_t t_start_sample_us = ggml_time_us();
  11238. bool allow_eog = false;
  11239. for (const auto & stack : grammar->stacks) {
  11240. if (stack.empty()) {
  11241. allow_eog = true;
  11242. break;
  11243. }
  11244. }
  11245. std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
  11246. candidates_decoded.reserve(candidates->size);
  11247. std::vector<llama_grammar_candidate> candidates_grammar;
  11248. candidates_grammar.reserve(candidates->size);
  11249. for (size_t i = 0; i < candidates->size; ++i) {
  11250. const llama_token id = candidates->data[i].id;
  11251. const std::string piece = llama_token_to_piece(ctx, id, false);
  11252. if (llama_token_is_eog(&ctx->model, id)) {
  11253. if (!allow_eog) {
  11254. candidates->data[i].logit = -INFINITY;
  11255. }
  11256. } else if (piece.empty() || piece[0] == 0) {
  11257. candidates->data[i].logit = -INFINITY;
  11258. } else {
  11259. candidates_decoded.push_back(decode_utf8(piece, grammar->partial_utf8));
  11260. candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second });
  11261. }
  11262. }
  11263. const auto rejects = llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
  11264. for (const auto & reject : rejects) {
  11265. candidates->data[reject.index].logit = -INFINITY;
  11266. }
  11267. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11268. }
  11269. static void llama_log_softmax(float * array, size_t size) {
  11270. float max_l = *std::max_element(array, array + size);
  11271. float sum = 0.f;
  11272. for (size_t i = 0; i < size; ++i) {
  11273. float p = expf(array[i] - max_l);
  11274. sum += p;
  11275. array[i] = p;
  11276. }
  11277. for (size_t i = 0; i < size; ++i) {
  11278. array[i] = logf(array[i] / sum);
  11279. }
  11280. }
  11281. void llama_sample_apply_guidance(
  11282. struct llama_context * ctx,
  11283. float * logits,
  11284. float * logits_guidance,
  11285. float scale) {
  11286. GGML_ASSERT(ctx);
  11287. const auto t_start_sample_us = ggml_time_us();
  11288. const auto n_vocab = llama_n_vocab(llama_get_model(ctx));
  11289. llama_log_softmax(logits, n_vocab);
  11290. llama_log_softmax(logits_guidance, n_vocab);
  11291. for (int i = 0; i < n_vocab; ++i) {
  11292. auto & l = logits[i];
  11293. const auto & g = logits_guidance[i];
  11294. l = scale * (l - g) + g;
  11295. }
  11296. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11297. }
  11298. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int32_t m, float * mu) {
  11299. GGML_ASSERT(ctx);
  11300. auto N = float(llama_n_vocab(llama_get_model(ctx)));
  11301. int64_t t_start_sample_us;
  11302. t_start_sample_us = ggml_time_us();
  11303. llama_sample_softmax(nullptr, candidates);
  11304. // Estimate s_hat using the most probable m tokens
  11305. float s_hat = 0.0;
  11306. float sum_ti_bi = 0.0;
  11307. float sum_ti_sq = 0.0;
  11308. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  11309. float t_i = logf(float(i + 2) / float(i + 1));
  11310. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  11311. sum_ti_bi += t_i * b_i;
  11312. sum_ti_sq += t_i * t_i;
  11313. }
  11314. s_hat = sum_ti_bi / sum_ti_sq;
  11315. // Compute k from the estimated s_hat and target surprise value
  11316. float epsilon_hat = s_hat - 1;
  11317. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  11318. // Sample the next word X using top-k sampling
  11319. llama_sample_top_k(nullptr, candidates, int(k), 1);
  11320. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11321. llama_token X = llama_sample_token(ctx, candidates);
  11322. t_start_sample_us = ggml_time_us();
  11323. // Compute error as the difference between observed surprise and target surprise value
  11324. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  11325. return candidate.id == X;
  11326. }));
  11327. float observed_surprise = -log2f(candidates->data[X_idx].p);
  11328. float e = observed_surprise - tau;
  11329. // Update mu using the learning rate and error
  11330. *mu = *mu - eta * e;
  11331. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11332. return X;
  11333. }
  11334. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  11335. int64_t t_start_sample_us;
  11336. t_start_sample_us = ggml_time_us();
  11337. llama_sample_softmax(ctx, candidates);
  11338. // Truncate the words with surprise values greater than mu
  11339. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  11340. return -log2f(candidate.p) > *mu;
  11341. }));
  11342. if (candidates->size == 0) {
  11343. candidates->size = 1;
  11344. }
  11345. if (ctx) {
  11346. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11347. }
  11348. // Normalize the probabilities of the remaining words
  11349. llama_sample_softmax(ctx, candidates);
  11350. // Sample the next word X from the remaining words
  11351. llama_token X = llama_sample_token(ctx, candidates);
  11352. t_start_sample_us = ggml_time_us();
  11353. // Compute error as the difference between observed surprise and target surprise value
  11354. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  11355. return candidate.id == X;
  11356. }));
  11357. float observed_surprise = -log2f(candidates->data[X_idx].p);
  11358. float e = observed_surprise - tau;
  11359. // Update mu using the learning rate and error
  11360. *mu = *mu - eta * e;
  11361. if (ctx) {
  11362. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11363. }
  11364. return X;
  11365. }
  11366. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  11367. const int64_t t_start_sample_us = ggml_time_us();
  11368. // Find max element
  11369. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  11370. return a.logit < b.logit;
  11371. });
  11372. llama_token result = max_iter->id;
  11373. if (ctx) {
  11374. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11375. ctx->n_sample++;
  11376. }
  11377. return result;
  11378. }
  11379. llama_token llama_sample_token_with_rng(struct llama_context * ctx, llama_token_data_array * candidates, std::mt19937 & rng) {
  11380. GGML_ASSERT(ctx);
  11381. const int64_t t_start_sample_us = ggml_time_us();
  11382. llama_sample_softmax(nullptr, candidates);
  11383. std::vector<float> probs;
  11384. probs.reserve(candidates->size);
  11385. for (size_t i = 0; i < candidates->size; ++i) {
  11386. probs.push_back(candidates->data[i].p);
  11387. }
  11388. std::discrete_distribution<> dist(probs.begin(), probs.end());
  11389. int idx = dist(rng);
  11390. llama_token result = candidates->data[idx].id;
  11391. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11392. ctx->n_sample++;
  11393. return result;
  11394. }
  11395. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  11396. return llama_sample_token_with_rng(ctx, candidates, ctx->rng);
  11397. }
  11398. void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
  11399. const int64_t t_start_sample_us = ggml_time_us();
  11400. if (llama_token_is_eog(&ctx->model, token)) {
  11401. for (const auto & stack : grammar->stacks) {
  11402. if (stack.empty()) {
  11403. return;
  11404. }
  11405. }
  11406. GGML_ASSERT(false);
  11407. }
  11408. const std::string piece = llama_token_to_piece(ctx, token, false);
  11409. // Note terminating 0 in decoded string
  11410. const auto decoded = decode_utf8(piece, grammar->partial_utf8);
  11411. const auto & code_points = decoded.first;
  11412. std::vector<std::vector<const llama_grammar_element *>> tmp_new_stacks;
  11413. for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
  11414. llama_grammar_accept(grammar->rules, grammar->stacks, *it, tmp_new_stacks);
  11415. grammar->stacks = tmp_new_stacks;
  11416. }
  11417. grammar->partial_utf8 = decoded.second;
  11418. GGML_ASSERT(!grammar->stacks.empty());
  11419. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11420. }
  11421. //
  11422. // Beam search
  11423. //
  11424. struct llama_beam {
  11425. std::vector<llama_token> tokens;
  11426. float p; // Cumulative beam probability (renormalized relative to all beams)
  11427. bool eob; // Initialize end-of-beam to false. Callback sets this to true.
  11428. // Sort beams by probability. In case of ties, prefer beams at eob.
  11429. bool operator<(const llama_beam & rhs) const {
  11430. return std::make_pair(p, eob) < std::make_pair(rhs.p, rhs.eob);
  11431. }
  11432. // Shift off first n tokens and discard them.
  11433. void shift_tokens(const size_t n) {
  11434. if (n) {
  11435. std::copy(tokens.begin() + n, tokens.end(), tokens.begin());
  11436. tokens.resize(tokens.size() - n);
  11437. }
  11438. }
  11439. llama_beam_view view() const { return {tokens.data(), tokens.size(), p, eob}; }
  11440. };
  11441. // A struct for calculating logit-related info.
  11442. struct llama_logit_info {
  11443. const float * const logits;
  11444. const int n_vocab;
  11445. const float max_l;
  11446. const float normalizer;
  11447. struct sum_exp {
  11448. float max_l;
  11449. float operator()(float sum, float l) const { return sum + std::exp(l - max_l); }
  11450. };
  11451. llama_logit_info(llama_context * ctx)
  11452. : logits(llama_get_logits(ctx))
  11453. , n_vocab(llama_n_vocab(llama_get_model(ctx)))
  11454. , max_l(*std::max_element(logits, logits + n_vocab))
  11455. , normalizer(1.0f / std::accumulate(logits, logits + n_vocab, 0.0f, sum_exp{max_l}))
  11456. { }
  11457. llama_token_data get_token_data(const llama_token token_id) const {
  11458. constexpr auto p = std::numeric_limits<float>::quiet_NaN(); // never used
  11459. return {token_id, logits[token_id], p};
  11460. }
  11461. // Return top k token_data by logit.
  11462. std::vector<llama_token_data> top_k(size_t k) {
  11463. std::vector<llama_token_data> min_heap; // min-heap by logit
  11464. const llama_token k_min = std::min(static_cast<llama_token>(k), n_vocab);
  11465. min_heap.reserve(k_min);
  11466. for (llama_token token_id = 0 ; token_id < k_min ; ++token_id) {
  11467. min_heap.push_back(get_token_data(token_id));
  11468. }
  11469. auto comp = [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; };
  11470. std::make_heap(min_heap.begin(), min_heap.end(), comp);
  11471. for (llama_token token_id = k_min ; token_id < n_vocab ; ++token_id) {
  11472. if (min_heap.front().logit < logits[token_id]) {
  11473. std::pop_heap(min_heap.begin(), min_heap.end(), comp);
  11474. min_heap.back().id = token_id;
  11475. min_heap.back().logit = logits[token_id];
  11476. std::push_heap(min_heap.begin(), min_heap.end(), comp);
  11477. }
  11478. }
  11479. return min_heap;
  11480. }
  11481. float probability_from_logit(float logit) const {
  11482. return normalizer * std::exp(logit - max_l);
  11483. }
  11484. };
  11485. struct llama_beam_search_data {
  11486. llama_context * ctx;
  11487. size_t n_beams;
  11488. int n_past;
  11489. int n_predict;
  11490. std::vector<llama_beam> beams;
  11491. std::vector<llama_beam> next_beams;
  11492. // Re-calculated on each loop iteration
  11493. size_t common_prefix_length;
  11494. // Used to communicate to/from callback on beams state.
  11495. std::vector<llama_beam_view> beam_views;
  11496. llama_beam_search_data(llama_context * ctx, size_t n_beams, int n_past, int n_predict)
  11497. : ctx(ctx)
  11498. , n_beams(n_beams)
  11499. , n_past(n_past)
  11500. , n_predict(n_predict)
  11501. , beam_views(n_beams) {
  11502. beams.reserve(n_beams);
  11503. next_beams.reserve(n_beams);
  11504. }
  11505. // Collapse beams to a single beam given by index.
  11506. void collapse_beams(const size_t beam_idx) {
  11507. if (0u < beam_idx) {
  11508. std::swap(beams[0], beams[beam_idx]);
  11509. }
  11510. beams.resize(1);
  11511. }
  11512. // Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
  11513. // The repetitive patterns below reflect the 2 stages of heaps:
  11514. // * Gather elements until the vector is full, then call std::make_heap() on it.
  11515. // * If the heap is full and a new element is found that should be included, pop the
  11516. // least element to the back(), replace it with the new, then push it into the heap.
  11517. void fill_next_beams_by_top_probabilities(llama_beam & beam) {
  11518. // Min-heaps use a greater-than comparator.
  11519. const auto comp = [](const llama_beam & a, const llama_beam & b) { return a.p > b.p; };
  11520. if (beam.eob) {
  11521. // beam is at end-of-sentence, so just copy it to next_beams if its probability is high enough.
  11522. if (next_beams.size() < n_beams) {
  11523. next_beams.push_back(std::move(beam));
  11524. if (next_beams.size() == n_beams) {
  11525. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  11526. }
  11527. } else if (next_beams.front().p < beam.p) {
  11528. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  11529. next_beams.back() = std::move(beam);
  11530. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  11531. }
  11532. } else {
  11533. // beam is not at end-of-sentence, so branch with next top_k tokens.
  11534. if (!beam.tokens.empty()) {
  11535. llama_decode(ctx, llama_batch_get_one(beam.tokens.data(), beam.tokens.size(), n_past, 0));
  11536. }
  11537. llama_logit_info logit_info(ctx);
  11538. std::vector<llama_token_data> next_tokens = logit_info.top_k(n_beams);
  11539. // Clear the kv slot so that other beams may try different tokens at this position. The llama_decode()
  11540. // call in loop() will conclusively fill in the kv slot once the beams converge at this position.
  11541. llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
  11542. size_t i=0;
  11543. if (next_beams.size() < n_beams) {
  11544. for (; next_beams.size() < n_beams ; ++i) {
  11545. llama_beam next_beam = beam;
  11546. next_beam.tokens.push_back(next_tokens[i].id);
  11547. next_beam.p *= logit_info.probability_from_logit(next_tokens[i].logit);
  11548. next_beams.push_back(std::move(next_beam));
  11549. }
  11550. std::make_heap(next_beams.begin(), next_beams.end(), comp);
  11551. } else {
  11552. for (; next_beams.front().p == 0.0f ; ++i) {
  11553. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  11554. next_beams.back() = beam;
  11555. next_beams.back().tokens.push_back(next_tokens[i].id);
  11556. next_beams.back().p *= logit_info.probability_from_logit(next_tokens[i].logit);
  11557. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  11558. }
  11559. }
  11560. for (; i < n_beams ; ++i) {
  11561. const float next_p = beam.p * logit_info.probability_from_logit(next_tokens[i].logit);
  11562. if (next_beams.front().p < next_p) {
  11563. std::pop_heap(next_beams.begin(), next_beams.end(), comp);
  11564. next_beams.back() = beam;
  11565. next_beams.back().tokens.push_back(next_tokens[i].id);
  11566. next_beams.back().p = next_p;
  11567. std::push_heap(next_beams.begin(), next_beams.end(), comp);
  11568. }
  11569. }
  11570. }
  11571. }
  11572. // Find common_prefix_length based on beams.
  11573. // Requires beams is not empty.
  11574. size_t find_common_prefix_length() {
  11575. size_t common_prefix_length = beams[0].tokens.size();
  11576. for (size_t i = 1 ; i < beams.size() ; ++i) {
  11577. common_prefix_length = std::min(common_prefix_length, beams[i].tokens.size());
  11578. for (size_t j = 0 ; j < common_prefix_length ; ++j) {
  11579. if (beams[0].tokens[j] != beams[i].tokens[j]) {
  11580. common_prefix_length = j;
  11581. break;
  11582. }
  11583. }
  11584. }
  11585. return common_prefix_length;
  11586. }
  11587. // Construct beams_state to send back to caller via the callback function.
  11588. // Side effect: set common_prefix_length = find_common_prefix_length();
  11589. llama_beams_state get_beams_state(const bool last_call) {
  11590. for (size_t i = 0 ; i < beams.size() ; ++i) {
  11591. beam_views[i] = beams[i].view();
  11592. }
  11593. common_prefix_length = find_common_prefix_length();
  11594. return {beam_views.data(), beams.size(), common_prefix_length, last_call};
  11595. }
  11596. // Loop:
  11597. // * while i < n_predict, AND
  11598. // * any of the beams have not yet reached end-of-beam (eob), AND
  11599. // * the highest probability beam(s) (plural in case of ties) are not at end-of-sentence
  11600. // (since all other beam probabilities can only decrease)
  11601. void loop(const llama_beam_search_callback_fn_t callback, void * const callback_data) {
  11602. beams.push_back({{}, 1.0f, false}); // Start with one empty beam w/ probability = 1.0 and !eob.
  11603. const auto not_eob = [](const llama_beam & beam) { return !beam.eob; };
  11604. for (int i = 0 ; i < n_predict && std::any_of(beams.begin(),beams.end(),not_eob) &&
  11605. !beams[top_beam_index()].eob ; ++i) {
  11606. callback(callback_data, get_beams_state(false)); // Sets common_prefix_length
  11607. update_beams_from_beam_views(); // Update values (p,eob) that callback may have changed.
  11608. if (common_prefix_length) {
  11609. llama_decode(ctx, llama_batch_get_one(beams[0].tokens.data(), common_prefix_length, n_past, 0));
  11610. n_past += common_prefix_length;
  11611. }
  11612. // Zero-out next_beam probabilities to place them last in following min-heap.
  11613. std::for_each(next_beams.begin(), next_beams.end(), [](llama_beam & beam) { beam.p = 0.0f; });
  11614. for (llama_beam & beam : beams) {
  11615. beam.shift_tokens(common_prefix_length);
  11616. fill_next_beams_by_top_probabilities(beam);
  11617. }
  11618. // next_beams become the beams of next/final iteration. Swap them to re-use memory.
  11619. beams.swap(next_beams);
  11620. renormalize_beam_probabilities(beams);
  11621. }
  11622. collapse_beams(top_beam_index());
  11623. callback(callback_data, get_beams_state(true));
  11624. }
  11625. // As beams grow, the cumulative probabilities decrease.
  11626. // Renormalize them to avoid floating point underflow.
  11627. static void renormalize_beam_probabilities(std::vector<llama_beam> & beams) {
  11628. const auto sum_p = [](float sum, llama_beam & beam) { return sum + beam.p; };
  11629. const float inv_sum = 1.0f / std::accumulate(beams.begin(), beams.end(), 0.0f, sum_p);
  11630. std::for_each(beams.begin(), beams.end(), [=](llama_beam & beam) { beam.p *= inv_sum; });
  11631. }
  11632. // Assumes beams is non-empty. Uses llama_beam::operator<() for ordering.
  11633. size_t top_beam_index() {
  11634. return std::max_element(beams.begin(), beams.end()) - beams.begin();
  11635. }
  11636. // Copy (p,eob) for each beam which may have been changed by the callback.
  11637. void update_beams_from_beam_views() {
  11638. for (size_t i = 0 ; i < beams.size() ; ++i) {
  11639. beams[i].p = beam_views[i].p;
  11640. beams[i].eob = beam_views[i].eob;
  11641. }
  11642. }
  11643. };
  11644. void llama_beam_search(llama_context * ctx,
  11645. llama_beam_search_callback_fn_t callback, void * callback_data,
  11646. size_t n_beams, int n_past, int n_predict) {
  11647. assert(ctx);
  11648. const int64_t t_start_sample_us = ggml_time_us();
  11649. llama_beam_search_data beam_search_data(ctx, n_beams, n_past, n_predict);
  11650. beam_search_data.loop(callback, callback_data);
  11651. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11652. ctx->n_sample++;
  11653. }
  11654. //
  11655. // quantization
  11656. //
  11657. struct quantize_state_internal {
  11658. const llama_model & model;
  11659. const llama_model_quantize_params * params;
  11660. int n_attention_wv = 0;
  11661. int n_ffn_down = 0;
  11662. int n_ffn_gate = 0;
  11663. int n_ffn_up = 0;
  11664. int i_attention_wv = 0;
  11665. int i_ffn_down = 0;
  11666. int i_ffn_gate = 0;
  11667. int i_ffn_up = 0;
  11668. int n_k_quantized = 0;
  11669. int n_fallback = 0;
  11670. bool has_imatrix = false;
  11671. // used to figure out if a model shares tok_embd with the output weight
  11672. bool has_output = false;
  11673. quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
  11674. : model(model)
  11675. , params(params)
  11676. {}
  11677. };
  11678. static void llama_tensor_dequantize_internal(
  11679. struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
  11680. const size_t nelements, const int nthread
  11681. ) {
  11682. if (output.size() < nelements) {
  11683. output.resize(nelements);
  11684. }
  11685. float * f32_output = (float *) output.data();
  11686. ggml_type_traits_t qtype;
  11687. if (ggml_is_quantized(tensor->type)) {
  11688. qtype = ggml_internal_get_type_traits(tensor->type);
  11689. if (qtype.to_float == NULL) {
  11690. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
  11691. }
  11692. } else if (tensor->type != GGML_TYPE_F16 &&
  11693. tensor->type != GGML_TYPE_BF16) {
  11694. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
  11695. }
  11696. if (nthread < 2) {
  11697. if (tensor->type == GGML_TYPE_F16) {
  11698. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
  11699. } else if (tensor->type == GGML_TYPE_BF16) {
  11700. ggml_bf16_to_fp32_row((ggml_bf16_t *)tensor->data, f32_output, nelements);
  11701. } else if (ggml_is_quantized(tensor->type)) {
  11702. qtype.to_float(tensor->data, f32_output, nelements);
  11703. } else {
  11704. GGML_ASSERT(false); // unreachable
  11705. }
  11706. return;
  11707. }
  11708. size_t block_size;
  11709. if (tensor->type == GGML_TYPE_F16 ||
  11710. tensor->type == GGML_TYPE_BF16) {
  11711. block_size = 1;
  11712. } else {
  11713. block_size = (size_t)ggml_blck_size(tensor->type);
  11714. }
  11715. size_t block_size_bytes = ggml_type_size(tensor->type);
  11716. GGML_ASSERT(nelements % block_size == 0);
  11717. size_t nblocks = nelements / block_size;
  11718. size_t blocks_per_thread = nblocks / nthread;
  11719. size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  11720. size_t in_buff_offs = 0;
  11721. size_t out_buff_offs = 0;
  11722. for (int tnum = 0; tnum < nthread; tnum++) {
  11723. size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  11724. size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
  11725. size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  11726. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  11727. if (typ == GGML_TYPE_F16) {
  11728. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  11729. } else if (typ == GGML_TYPE_BF16) {
  11730. ggml_bf16_to_fp32_row((ggml_bf16_t *)inbuf, outbuf, nels);
  11731. } else {
  11732. qtype.to_float(inbuf, outbuf, nels);
  11733. }
  11734. };
  11735. workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
  11736. in_buff_offs += thr_block_bytes;
  11737. out_buff_offs += thr_elems;
  11738. }
  11739. for (auto & w : workers) { w.join(); }
  11740. workers.clear();
  11741. }
  11742. static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
  11743. const std::string name = ggml_get_name(tensor);
  11744. // TODO: avoid hardcoded tensor names - use the TN_* constants
  11745. const llm_arch arch = qs.model.arch;
  11746. const auto tn = LLM_TN(arch);
  11747. auto use_more_bits = [](int i_layer, int num_layers) -> bool {
  11748. return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
  11749. };
  11750. const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
  11751. auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
  11752. if (n_expert > 1) {
  11753. // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but iccasionally randomly
  11754. // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
  11755. // for getting the current layer as I initially thought, and we need to resort to parsing the
  11756. // tensor name.
  11757. if (sscanf(name, "blk.%d.", &i_layer) != 1) {
  11758. throw std::runtime_error(format("Failed to determine layer for tensor %s", name));
  11759. }
  11760. if (i_layer < 0 || i_layer >= n_layer) {
  11761. throw std::runtime_error(format("Bad layer %d for tensor %s. Must be in [0, %d)", i_layer, name, n_layer));
  11762. }
  11763. }
  11764. return std::make_pair(i_layer, n_layer);
  11765. };
  11766. // for arches that share the same tensor between the token embeddings and the output, we quantize the token embeddings
  11767. // with the quantization of the output tensor
  11768. if (name == tn(LLM_TENSOR_OUTPUT, "weight") || (!qs.has_output && name == tn(LLM_TENSOR_TOKEN_EMBD, "weight"))) {
  11769. if (qs.params->output_tensor_type < GGML_TYPE_COUNT) {
  11770. new_type = qs.params->output_tensor_type;
  11771. } else {
  11772. int nx = tensor->ne[0];
  11773. if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
  11774. new_type = GGML_TYPE_Q8_0;
  11775. }
  11776. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
  11777. ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ||
  11778. ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
  11779. new_type = GGML_TYPE_Q5_K;
  11780. }
  11781. else if (new_type != GGML_TYPE_Q8_0) {
  11782. new_type = GGML_TYPE_Q6_K;
  11783. }
  11784. }
  11785. } else if (name == "token_embd.weight") {
  11786. if (qs.params->token_embedding_type < GGML_TYPE_COUNT) {
  11787. new_type = qs.params->token_embedding_type;
  11788. } else {
  11789. if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS ||
  11790. ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
  11791. new_type = GGML_TYPE_Q2_K;
  11792. }
  11793. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
  11794. new_type = GGML_TYPE_IQ3_S;
  11795. }
  11796. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  11797. new_type = GGML_TYPE_IQ3_S;
  11798. }
  11799. }
  11800. } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
  11801. ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
  11802. if (name.find("attn_v.weight") != std::string::npos) {
  11803. if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
  11804. else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
  11805. ++qs.i_attention_wv;
  11806. }
  11807. else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) {
  11808. new_type = GGML_TYPE_Q4_K;
  11809. }
  11810. else if (name.find("ffn_down") != std::string::npos) {
  11811. if (qs.i_ffn_down < qs.n_ffn_down/8) {
  11812. new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
  11813. }
  11814. ++qs.i_ffn_down;
  11815. }
  11816. else if (name.find("attn_output.weight") != std::string::npos) {
  11817. if (qs.model.hparams.n_expert == 8) {
  11818. new_type = GGML_TYPE_Q5_K;
  11819. } else {
  11820. if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
  11821. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
  11822. }
  11823. }
  11824. } else if (name.find("attn_v.weight") != std::string::npos) {
  11825. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
  11826. new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
  11827. }
  11828. else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
  11829. new_type = GGML_TYPE_Q4_K;
  11830. }
  11831. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  11832. new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : !qs.has_imatrix ? GGML_TYPE_IQ3_S : GGML_TYPE_IQ3_XXS;
  11833. }
  11834. else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S) && qs.model.hparams.n_gqa() >= 4) {
  11835. new_type = GGML_TYPE_Q4_K;
  11836. }
  11837. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
  11838. new_type = GGML_TYPE_Q4_K;
  11839. }
  11840. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  11841. new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  11842. }
  11843. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  11844. else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model.hparams.n_gqa() >= 4) {
  11845. new_type = GGML_TYPE_Q5_K;
  11846. }
  11847. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  11848. use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
  11849. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
  11850. else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
  11851. (qs.i_attention_wv < qs.n_attention_wv/8 || qs.i_attention_wv >= 7*qs.n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
  11852. if (qs.model.type == MODEL_70B) {
  11853. // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
  11854. // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
  11855. // nearly negligible increase in model size by quantizing this tensor with more bits:
  11856. if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
  11857. }
  11858. if (qs.model.hparams.n_expert == 8) {
  11859. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  11860. // TODO: explore better strategies
  11861. new_type = GGML_TYPE_Q8_0;
  11862. }
  11863. ++qs.i_attention_wv;
  11864. } else if (name.find("attn_k.weight") != std::string::npos) {
  11865. if (qs.model.hparams.n_expert == 8) {
  11866. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  11867. // TODO: explore better strategies
  11868. new_type = GGML_TYPE_Q8_0;
  11869. }
  11870. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
  11871. new_type = GGML_TYPE_IQ3_XXS;
  11872. }
  11873. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  11874. new_type = GGML_TYPE_IQ2_S;
  11875. }
  11876. } else if (name.find("attn_q.weight") != std::string::npos) {
  11877. if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
  11878. new_type = GGML_TYPE_IQ3_XXS;
  11879. }
  11880. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  11881. new_type = GGML_TYPE_IQ2_S;
  11882. }
  11883. } else if (name.find("ffn_down") != std::string::npos) {
  11884. auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
  11885. int i_layer = info.first, n_layer = info.second;
  11886. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  11887. else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
  11888. if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
  11889. }
  11890. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS && !qs.has_imatrix) {
  11891. new_type = i_layer < n_layer/8 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
  11892. }
  11893. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  11894. new_type = i_layer < n_layer/16 ? GGML_TYPE_Q5_K
  11895. : arch != LLM_ARCH_FALCON || use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q4_K
  11896. : GGML_TYPE_Q3_K;
  11897. }
  11898. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
  11899. (qs.model.hparams.n_expert == 8 && use_more_bits(i_layer, n_layer)))) {
  11900. new_type = GGML_TYPE_Q4_K;
  11901. }
  11902. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
  11903. new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
  11904. }
  11905. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
  11906. if (arch == LLM_ARCH_FALCON) {
  11907. new_type = i_layer < n_layer/16 ? GGML_TYPE_Q6_K :
  11908. use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  11909. } else {
  11910. if (use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
  11911. }
  11912. }
  11913. else if (i_layer < n_layer/8 && (ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && !qs.has_imatrix) {
  11914. new_type = GGML_TYPE_Q5_K;
  11915. }
  11916. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
  11917. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && i_layer < n_layer/8) {
  11918. new_type = GGML_TYPE_Q5_K;
  11919. }
  11920. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || ftype == LLAMA_FTYPE_MOSTLY_Q5_0)
  11921. && qs.has_imatrix && i_layer < n_layer/8) {
  11922. // Guard against craziness in the first few ffn_down layers that can happen even with imatrix for Q4_0/Q5_0.
  11923. // We only do it when an imatrix is provided because a) we want to make sure that one can always get the
  11924. // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
  11925. new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
  11926. }
  11927. ++qs.i_ffn_down;
  11928. } else if (name.find("attn_output.weight") != std::string::npos) {
  11929. if (arch != LLM_ARCH_FALCON) {
  11930. if (qs.model.hparams.n_expert == 8) {
  11931. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
  11932. ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
  11933. ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
  11934. ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) {
  11935. new_type = GGML_TYPE_Q5_K;
  11936. }
  11937. } else {
  11938. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
  11939. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
  11940. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ) new_type = GGML_TYPE_Q4_K;
  11941. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ) new_type = GGML_TYPE_Q5_K;
  11942. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M ) new_type = GGML_TYPE_Q4_K;
  11943. }
  11944. } else {
  11945. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  11946. }
  11947. }
  11948. else if (name.find("attn_qkv.weight") != std::string::npos) {
  11949. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
  11950. new_type = GGML_TYPE_Q4_K;
  11951. }
  11952. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
  11953. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
  11954. }
  11955. else if (name.find("ffn_gate") != std::string::npos) {
  11956. auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
  11957. int i_layer = info.first, n_layer = info.second;
  11958. if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
  11959. new_type = GGML_TYPE_IQ3_XXS;
  11960. }
  11961. ++qs.i_ffn_gate;
  11962. }
  11963. else if (name.find("ffn_up") != std::string::npos) {
  11964. auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
  11965. int i_layer = info.first, n_layer = info.second;
  11966. if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
  11967. new_type = GGML_TYPE_IQ3_XXS;
  11968. }
  11969. ++qs.i_ffn_up;
  11970. }
  11971. // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  11972. //}
  11973. // IK: let's remove this, else Q2_K is almost the same as Q3_K_S
  11974. //else if (name.find("ffn_gate") != std::string::npos || name.find("ffn_up") != std::string::npos) {
  11975. // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  11976. //}
  11977. // This can be used to reduce the size of the Q5_K_S model.
  11978. // The associated PPL increase is fully in line with the size reduction
  11979. //else {
  11980. // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
  11981. //}
  11982. bool convert_incompatible_tensor = false;
  11983. if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
  11984. new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K || new_type == GGML_TYPE_IQ4_XS ||
  11985. new_type == GGML_TYPE_IQ2_XS || new_type == GGML_TYPE_IQ2_XXS || new_type == GGML_TYPE_IQ2_S ||
  11986. new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S || new_type == GGML_TYPE_IQ3_S ||
  11987. new_type == GGML_TYPE_IQ1_M) {
  11988. int nx = tensor->ne[0];
  11989. int ny = tensor->ne[1];
  11990. if (nx % QK_K != 0) {
  11991. LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
  11992. convert_incompatible_tensor = true;
  11993. } else {
  11994. ++qs.n_k_quantized;
  11995. }
  11996. }
  11997. if (convert_incompatible_tensor) {
  11998. switch (new_type) {
  11999. case GGML_TYPE_IQ2_XXS:
  12000. case GGML_TYPE_IQ2_XS:
  12001. case GGML_TYPE_IQ2_S:
  12002. case GGML_TYPE_IQ3_XXS:
  12003. case GGML_TYPE_IQ3_S:
  12004. case GGML_TYPE_IQ1_S:
  12005. case GGML_TYPE_IQ1_M:
  12006. case GGML_TYPE_Q2_K:
  12007. case GGML_TYPE_Q3_K:
  12008. case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
  12009. case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
  12010. case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
  12011. case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
  12012. default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
  12013. }
  12014. LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
  12015. ++qs.n_fallback;
  12016. }
  12017. return new_type;
  12018. }
  12019. static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
  12020. if (nthread < 2) {
  12021. // single-thread
  12022. size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
  12023. if (!ggml_validate_row_data(new_type, new_data, new_size)) {
  12024. throw std::runtime_error("quantized data validation failed");
  12025. }
  12026. return new_size;
  12027. }
  12028. std::mutex mutex;
  12029. int64_t counter = 0;
  12030. size_t new_size = 0;
  12031. bool valid = true;
  12032. auto compute = [&mutex, &counter, &new_size, &valid, new_type, f32_data, new_data, chunk_size,
  12033. nrows, n_per_row, imatrix]() {
  12034. const int64_t nrows_per_chunk = chunk_size / n_per_row;
  12035. size_t local_size = 0;
  12036. while (true) {
  12037. std::unique_lock<std::mutex> lock(mutex);
  12038. int64_t first_row = counter; counter += nrows_per_chunk;
  12039. if (first_row >= nrows) {
  12040. if (local_size > 0) {
  12041. new_size += local_size;
  12042. }
  12043. break;
  12044. }
  12045. lock.unlock();
  12046. const int64_t this_nrow = std::min(nrows - first_row, nrows_per_chunk);
  12047. size_t this_size = ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix);
  12048. local_size += this_size;
  12049. // validate the quantized data
  12050. const size_t row_size = ggml_row_size(new_type, n_per_row);
  12051. void * this_data = (char *) new_data + first_row * row_size;
  12052. if (!ggml_validate_row_data(new_type, this_data, this_size)) {
  12053. std::unique_lock<std::mutex> lock(mutex);
  12054. valid = false;
  12055. break;
  12056. }
  12057. }
  12058. };
  12059. for (int it = 0; it < nthread - 1; ++it) {
  12060. workers.emplace_back(compute);
  12061. }
  12062. compute();
  12063. for (auto & w : workers) { w.join(); }
  12064. workers.clear();
  12065. if (!valid) {
  12066. throw std::runtime_error("quantized data validation failed");
  12067. }
  12068. return new_size;
  12069. }
  12070. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  12071. ggml_type default_type;
  12072. llama_ftype ftype = params->ftype;
  12073. switch (params->ftype) {
  12074. case LLAMA_FTYPE_MOSTLY_Q4_0: default_type = GGML_TYPE_Q4_0; break;
  12075. case LLAMA_FTYPE_MOSTLY_Q4_1: default_type = GGML_TYPE_Q4_1; break;
  12076. case LLAMA_FTYPE_MOSTLY_Q5_0: default_type = GGML_TYPE_Q5_0; break;
  12077. case LLAMA_FTYPE_MOSTLY_Q5_1: default_type = GGML_TYPE_Q5_1; break;
  12078. case LLAMA_FTYPE_MOSTLY_Q8_0: default_type = GGML_TYPE_Q8_0; break;
  12079. case LLAMA_FTYPE_MOSTLY_F16: default_type = GGML_TYPE_F16; break;
  12080. case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break;
  12081. case LLAMA_FTYPE_ALL_F32: default_type = GGML_TYPE_F32; break;
  12082. // K-quants
  12083. case LLAMA_FTYPE_MOSTLY_Q2_K_S:
  12084. case LLAMA_FTYPE_MOSTLY_Q2_K: default_type = GGML_TYPE_Q2_K; break;
  12085. case LLAMA_FTYPE_MOSTLY_IQ3_XS: default_type = GGML_TYPE_IQ3_S; break;
  12086. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  12087. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  12088. case LLAMA_FTYPE_MOSTLY_Q3_K_L: default_type = GGML_TYPE_Q3_K; break;
  12089. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  12090. case LLAMA_FTYPE_MOSTLY_Q4_K_M: default_type = GGML_TYPE_Q4_K; break;
  12091. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  12092. case LLAMA_FTYPE_MOSTLY_Q5_K_M: default_type = GGML_TYPE_Q5_K; break;
  12093. case LLAMA_FTYPE_MOSTLY_Q6_K: default_type = GGML_TYPE_Q6_K; break;
  12094. case LLAMA_FTYPE_MOSTLY_IQ2_XXS: default_type = GGML_TYPE_IQ2_XXS; break;
  12095. case LLAMA_FTYPE_MOSTLY_IQ2_XS: default_type = GGML_TYPE_IQ2_XS; break;
  12096. case LLAMA_FTYPE_MOSTLY_IQ2_S: default_type = GGML_TYPE_IQ2_XS; break;
  12097. case LLAMA_FTYPE_MOSTLY_IQ2_M: default_type = GGML_TYPE_IQ2_S; break;
  12098. case LLAMA_FTYPE_MOSTLY_IQ3_XXS: default_type = GGML_TYPE_IQ3_XXS; break;
  12099. case LLAMA_FTYPE_MOSTLY_IQ1_S: default_type = GGML_TYPE_IQ1_S; break;
  12100. case LLAMA_FTYPE_MOSTLY_IQ1_M: default_type = GGML_TYPE_IQ1_M; break;
  12101. case LLAMA_FTYPE_MOSTLY_IQ4_NL: default_type = GGML_TYPE_IQ4_NL; break;
  12102. case LLAMA_FTYPE_MOSTLY_IQ4_XS: default_type = GGML_TYPE_IQ4_XS; break;
  12103. case LLAMA_FTYPE_MOSTLY_IQ3_S: default_type = GGML_TYPE_IQ3_S; break;
  12104. case LLAMA_FTYPE_MOSTLY_IQ3_M: default_type = GGML_TYPE_IQ3_S; break;
  12105. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  12106. }
  12107. int nthread = params->nthread;
  12108. if (nthread <= 0) {
  12109. nthread = std::thread::hardware_concurrency();
  12110. }
  12111. // mmap consistently increases speed Linux, and also increases speed on Windows with
  12112. // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
  12113. #if defined(__linux__) || defined(_WIN32)
  12114. constexpr bool use_mmap = true;
  12115. #else
  12116. constexpr bool use_mmap = false;
  12117. #endif
  12118. llama_model_kv_override * kv_overrides = nullptr;
  12119. if (params->kv_overrides) {
  12120. auto v = (std::vector<llama_model_kv_override>*)params->kv_overrides;
  12121. kv_overrides = v->data();
  12122. }
  12123. llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides);
  12124. ml.init_mappings(false); // no prefetching
  12125. llama_model model;
  12126. llm_load_arch(ml, model);
  12127. llm_load_hparams(ml, model);
  12128. struct quantize_state_internal qs(model, params);
  12129. if (params->only_copy) {
  12130. ftype = model.ftype;
  12131. }
  12132. const std::unordered_map<std::string, std::vector<float>> * imatrix_data = nullptr;
  12133. if (params->imatrix) {
  12134. imatrix_data = static_cast<const std::unordered_map<std::string, std::vector<float>>*>(params->imatrix);
  12135. if (imatrix_data) {
  12136. LLAMA_LOG_INFO("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
  12137. qs.has_imatrix = true;
  12138. }
  12139. }
  12140. const size_t align = GGUF_DEFAULT_ALIGNMENT;
  12141. struct gguf_context * ctx_out = gguf_init_empty();
  12142. // copy the KV pairs from the input file
  12143. gguf_set_kv (ctx_out, ml.meta);
  12144. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  12145. gguf_set_val_u32(ctx_out, "general.file_type", ftype);
  12146. // Remove split metadata
  12147. gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_NO).c_str());
  12148. gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str());
  12149. gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str());
  12150. if (params->kv_overrides) {
  12151. const std::vector<llama_model_kv_override> & overrides = *(const std::vector<llama_model_kv_override> *)params->kv_overrides;
  12152. for (auto & o : overrides) {
  12153. if (o.key[0] == 0) break;
  12154. if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
  12155. gguf_set_val_f32(ctx_out, o.key, o.val_f64);
  12156. } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
  12157. gguf_set_val_i32(ctx_out, o.key, o.val_i64);
  12158. } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
  12159. gguf_set_val_bool(ctx_out, o.key, o.val_bool);
  12160. } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {
  12161. gguf_set_val_str(ctx_out, o.key, o.val_str);
  12162. } else {
  12163. LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
  12164. }
  12165. }
  12166. }
  12167. for (int i = 0; i < ml.n_tensors; ++i) {
  12168. const struct ggml_tensor * meta = ml.get_tensor_meta(i);
  12169. const std::string name = ggml_get_name(meta);
  12170. // TODO: avoid hardcoded tensor names - use the TN_* constants
  12171. if (name.find("attn_v.weight") != std::string::npos ||
  12172. name.find("attn_qkv.weight") != std::string::npos) {
  12173. ++qs.n_attention_wv;
  12174. } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
  12175. qs.has_output = true;
  12176. }
  12177. }
  12178. qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer;
  12179. // sanity checks
  12180. //
  12181. // - qs.n_attention_wv == 0 for Mamba models
  12182. // - qs.n_attention_wv == model.hparams.n_layer for Transformer models
  12183. //
  12184. GGML_ASSERT((qs.n_attention_wv == 0 || qs.n_attention_wv == (int)model.hparams.n_layer) && "n_attention_wv is unexpected");
  12185. size_t total_size_org = 0;
  12186. size_t total_size_new = 0;
  12187. std::vector<std::thread> workers;
  12188. workers.reserve(nthread);
  12189. int idx = 0;
  12190. std::vector<no_init<uint8_t>> read_data;
  12191. std::vector<no_init<uint8_t>> work;
  12192. std::vector<no_init<float>> f32_conv_buf;
  12193. uint16_t n_split = 1;
  12194. // Assume split index is continuous
  12195. if (params->keep_split) {
  12196. for (int i = 0; i < ml.n_tensors; ++i) {
  12197. n_split = std::max(uint16_t(ml.get_weight(i)->idx+1), n_split);
  12198. }
  12199. }
  12200. std::vector<gguf_context*> ctx_outs(n_split, NULL);
  12201. ctx_outs[0] = ctx_out;
  12202. // populate the original tensors so we get an initial meta data
  12203. for (int i = 0; i < ml.n_tensors; ++i) {
  12204. auto weight = ml.get_weight(i);
  12205. uint16_t i_split = params->keep_split ? weight->idx : 0;
  12206. struct ggml_tensor * tensor = weight->tensor;
  12207. if (ctx_outs[i_split] == NULL) {
  12208. ctx_outs[i_split] = gguf_init_empty();
  12209. }
  12210. gguf_add_tensor(ctx_outs[i_split], tensor);
  12211. }
  12212. // Set split info if needed
  12213. if (n_split > 1) {
  12214. for (size_t i = 0; i < ctx_outs.size(); ++i) {
  12215. gguf_set_val_u16(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i);
  12216. gguf_set_val_u16(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split);
  12217. gguf_set_val_i32(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors);
  12218. }
  12219. }
  12220. int cur_split = -1;
  12221. std::ofstream fout;
  12222. auto close_ofstream = [&]() {
  12223. // Write metadata and close file handler
  12224. if (fout.is_open()) {
  12225. fout.seekp(0);
  12226. std::vector<uint8_t> data(gguf_get_meta_size(ctx_outs[cur_split]));
  12227. gguf_get_meta_data(ctx_outs[cur_split], data.data());
  12228. fout.write((const char *) data.data(), data.size());
  12229. fout.close();
  12230. }
  12231. };
  12232. auto new_ofstream = [&](int index) {
  12233. cur_split = index;
  12234. GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
  12235. std::string fname = fname_out;
  12236. if (params->keep_split) {
  12237. char split_path[PATH_MAX] = {0};
  12238. llama_split_path(split_path, sizeof(split_path), fname_out.c_str(), cur_split, n_split);
  12239. fname = std::string(split_path);
  12240. }
  12241. fout = std::ofstream(fname, std::ios::binary);
  12242. fout.exceptions(std::ofstream::failbit); // fail fast on write errors
  12243. const size_t meta_size = gguf_get_meta_size(ctx_outs[cur_split]);
  12244. // placeholder for the meta data
  12245. ::zeros(fout, meta_size);
  12246. };
  12247. const auto tn = LLM_TN(model.arch);
  12248. new_ofstream(0);
  12249. for (int i = 0; i < ml.n_tensors; ++i) {
  12250. auto weight = ml.get_weight(i);
  12251. struct ggml_tensor * tensor = weight->tensor;
  12252. if (weight->idx != cur_split && params->keep_split) {
  12253. close_ofstream();
  12254. new_ofstream(weight->idx);
  12255. }
  12256. const std::string name = ggml_get_name(tensor);
  12257. if (!ml.use_mmap) {
  12258. if (read_data.size() < ggml_nbytes(tensor)) {
  12259. read_data.resize(ggml_nbytes(tensor));
  12260. }
  12261. tensor->data = read_data.data();
  12262. }
  12263. ml.load_data_for(tensor);
  12264. LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
  12265. ++idx, ml.n_tensors,
  12266. ggml_get_name(tensor),
  12267. llama_format_tensor_shape(tensor).c_str(),
  12268. ggml_type_name(tensor->type));
  12269. // This used to be a regex, but <regex> has an extreme cost to compile times.
  12270. bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
  12271. // quantize only 2D and 3D tensors (experts)
  12272. quantize &= (ggml_n_dims(tensor) >= 2);
  12273. // do not quantize norm tensors
  12274. quantize &= name.find("_norm.weight") == std::string::npos;
  12275. quantize &= params->quantize_output_tensor || name != "output.weight";
  12276. quantize &= !params->only_copy;
  12277. // do not quantize expert gating tensors
  12278. // NOTE: can't use LLM_TN here because the layer number is not known
  12279. quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
  12280. // do not quantize positional embeddings and token types (BERT)
  12281. quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");
  12282. quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight");
  12283. // do not quantize Mamba's small yet 2D weights
  12284. // NOTE: can't use LLM_TN here because the layer number is not known
  12285. quantize &= name.find("ssm_conv1d.weight") == std::string::npos;
  12286. quantize &= name.find("ssm_x.weight") == std::string::npos;
  12287. quantize &= name.find("ssm_dt.weight") == std::string::npos;
  12288. enum ggml_type new_type;
  12289. void * new_data;
  12290. size_t new_size;
  12291. if (quantize) {
  12292. new_type = default_type;
  12293. // get more optimal quantization type based on the tensor shape, layer, etc.
  12294. if (!params->pure && ggml_is_quantized(default_type)) {
  12295. new_type = llama_tensor_get_type(qs, new_type, tensor, ftype);
  12296. }
  12297. if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) {
  12298. new_type = params->token_embedding_type;
  12299. }
  12300. if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
  12301. new_type = params->output_tensor_type;
  12302. }
  12303. // If we've decided to quantize to the same type the tensor is already
  12304. // in then there's nothing to do.
  12305. quantize = tensor->type != new_type;
  12306. }
  12307. if (!quantize) {
  12308. new_type = tensor->type;
  12309. new_data = tensor->data;
  12310. new_size = ggml_nbytes(tensor);
  12311. LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
  12312. } else {
  12313. const int64_t nelements = ggml_nelements(tensor);
  12314. const float * imatrix = nullptr;
  12315. if (imatrix_data) {
  12316. auto it = imatrix_data->find(tensor->name);
  12317. if (it == imatrix_data->end()) {
  12318. LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
  12319. } else {
  12320. if (it->second.size() == (size_t)tensor->ne[0]*tensor->ne[2]) {
  12321. imatrix = it->second.data();
  12322. } else {
  12323. LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
  12324. int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name);
  12325. // this can happen when quantizing an old mixtral model with split tensors with a new incompatible imatrix
  12326. // this is a significant error and it may be good idea to abort the process if this happens,
  12327. // since many people will miss the error and not realize that most of the model is being quantized without an imatrix
  12328. // tok_embd should be ignored in this case, since it always causes this warning
  12329. if (name != tn(LLM_TENSOR_TOKEN_EMBD, "weight")) {
  12330. throw std::runtime_error(format("imatrix size %d is different from tensor size %d for %s",
  12331. int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name));
  12332. }
  12333. }
  12334. }
  12335. }
  12336. if ((new_type == GGML_TYPE_IQ2_XXS ||
  12337. new_type == GGML_TYPE_IQ2_XS ||
  12338. new_type == GGML_TYPE_IQ2_S ||
  12339. new_type == GGML_TYPE_IQ1_S ||
  12340. (new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight")) ||
  12341. (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
  12342. LLAMA_LOG_ERROR("\n\n============================================================\n");
  12343. LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
  12344. LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");
  12345. LLAMA_LOG_ERROR("============================================================\n\n");
  12346. throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name));
  12347. }
  12348. float * f32_data;
  12349. if (tensor->type == GGML_TYPE_F32) {
  12350. f32_data = (float *) tensor->data;
  12351. } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
  12352. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
  12353. } else {
  12354. llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
  12355. f32_data = (float *) f32_conv_buf.data();
  12356. }
  12357. LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
  12358. fflush(stdout);
  12359. if (work.size() < (size_t)nelements * 4) {
  12360. work.resize(nelements * 4); // upper bound on size
  12361. }
  12362. new_data = work.data();
  12363. const int64_t n_per_row = tensor->ne[0];
  12364. const int64_t nrows = tensor->ne[1];
  12365. static const int64_t min_chunk_size = 32 * 512;
  12366. const int64_t chunk_size = n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row);
  12367. const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
  12368. const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
  12369. const int64_t nthread_use = nthread > 1 ? std::max((int64_t)1, std::min((int64_t)nthread, nchunk)) : 1;
  12370. // quantize each expert separately since they have different importance matrices
  12371. new_size = 0;
  12372. for (int64_t i03 = 0; i03 < tensor->ne[2]; ++i03) {
  12373. const float * f32_data_03 = f32_data + i03 * nelements_matrix;
  12374. void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows;
  12375. const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr;
  12376. new_size += llama_tensor_quantize_internal(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
  12377. }
  12378. LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
  12379. }
  12380. total_size_org += ggml_nbytes(tensor);
  12381. total_size_new += new_size;
  12382. // update the gguf meta data as we go
  12383. gguf_set_tensor_type(ctx_outs[cur_split], name.c_str(), new_type);
  12384. gguf_set_tensor_data(ctx_outs[cur_split], name.c_str(), new_data, new_size);
  12385. // write tensor data + padding
  12386. fout.write((const char *) new_data, new_size);
  12387. zeros(fout, GGML_PAD(new_size, align) - new_size);
  12388. }
  12389. close_ofstream();
  12390. for (auto & c:ctx_outs) {
  12391. gguf_free(c);
  12392. }
  12393. LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  12394. LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  12395. if (qs.n_fallback > 0) {
  12396. LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n",
  12397. __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
  12398. }
  12399. }
  12400. static int llama_apply_lora_from_file_internal(
  12401. const struct llama_model & model, const char * path_lora, float scale, const char * path_base_model, int n_threads
  12402. ) {
  12403. LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  12404. const int64_t t_start_lora_us = ggml_time_us();
  12405. llama_file fin(path_lora, "rb");
  12406. // verify magic and version
  12407. {
  12408. uint32_t magic = fin.read_u32();
  12409. if (magic != LLAMA_FILE_MAGIC_GGLA) {
  12410. LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
  12411. return 1;
  12412. }
  12413. uint32_t format_version = fin.read_u32();
  12414. if (format_version != 1) {
  12415. LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
  12416. return 1;
  12417. }
  12418. }
  12419. int32_t lora_r = fin.read_u32();
  12420. int32_t lora_alpha = fin.read_u32();
  12421. float scaling = scale * (float)lora_alpha / (float)lora_r;
  12422. LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  12423. // load base model
  12424. std::unique_ptr<llama_model_loader> ml;
  12425. if (path_base_model) {
  12426. LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
  12427. ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*check_tensors*/ false, /*kv_overrides*/ nullptr));
  12428. ml->init_mappings(/*prefetch*/ false); // no prefetching
  12429. }
  12430. struct tensor_meta {
  12431. std::string name;
  12432. ggml_type type;
  12433. int32_t ne[2];
  12434. size_t offset;
  12435. };
  12436. std::map<std::string, tensor_meta> tensor_meta_map;
  12437. // load all tensor meta
  12438. while (true) {
  12439. if (fin.tell() == fin.size) {
  12440. // eof
  12441. break;
  12442. }
  12443. int32_t n_dims;
  12444. int32_t name_len;
  12445. int32_t ftype;
  12446. fin.read_raw(&n_dims, sizeof(n_dims));
  12447. fin.read_raw(&name_len, sizeof(name_len));
  12448. fin.read_raw(&ftype, sizeof(ftype));
  12449. if (n_dims != 1 && n_dims != 2) {
  12450. LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
  12451. return 1;
  12452. }
  12453. int32_t ne[2] = { 1, 1 };
  12454. for (int i = 0; i < n_dims; ++i) {
  12455. fin.read_raw(&ne[i], sizeof(ne[i]));
  12456. }
  12457. std::string name;
  12458. {
  12459. GGML_ASSERT(name_len < GGML_MAX_NAME);
  12460. char buf[GGML_MAX_NAME];
  12461. fin.read_raw(buf, name_len);
  12462. name = std::string(buf, name_len);
  12463. }
  12464. // check for lora suffix
  12465. std::string lora_suffix;
  12466. if (name.length() > 6) {
  12467. lora_suffix = name.substr(name.length() - 6);
  12468. }
  12469. if (lora_suffix != ".loraA" && lora_suffix != ".loraB") {
  12470. LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  12471. return 1;
  12472. }
  12473. // tensor type
  12474. ggml_type wtype;
  12475. switch (ftype) {
  12476. case 0: wtype = GGML_TYPE_F32; break;
  12477. case 1: wtype = GGML_TYPE_F16; break;
  12478. default:
  12479. {
  12480. LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
  12481. __func__, ftype);
  12482. return 1;
  12483. }
  12484. }
  12485. // data offset
  12486. size_t offset = fin.tell();
  12487. offset = (offset + 31) & -32;
  12488. // skip tensor data
  12489. fin.seek(offset + ggml_row_size(wtype, ne[0]) * ne[1], SEEK_SET);
  12490. tensor_meta_map.emplace(name, tensor_meta{ name, wtype, { ne[0], ne[1] }, offset });
  12491. }
  12492. bool warned = false;
  12493. int n_tensors = 0;
  12494. // apply
  12495. ggml_backend_t backend_cpu = ggml_backend_cpu_init();
  12496. if (backend_cpu == nullptr) {
  12497. LLAMA_LOG_ERROR("%s: error: failed to initialize cpu backend\n", __func__);
  12498. return 1;
  12499. }
  12500. ggml_backend_cpu_set_n_threads(backend_cpu, n_threads);
  12501. std::vector<no_init<uint8_t>> read_buf;
  12502. for (const auto & it : model.tensors_by_name) {
  12503. const std::string & base_name = it.first;
  12504. ggml_tensor * model_t = it.second;
  12505. if (tensor_meta_map.find(base_name + ".loraA") == tensor_meta_map.end() ||
  12506. tensor_meta_map.find(base_name + ".loraB") == tensor_meta_map.end()) {
  12507. continue;
  12508. }
  12509. tensor_meta & metaA = tensor_meta_map.at(base_name + ".loraA");
  12510. tensor_meta & metaB = tensor_meta_map.at(base_name + ".loraB");
  12511. ggml_init_params lora_init_params = {
  12512. /* .mem_size */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
  12513. /* .mem_buffer */ nullptr,
  12514. /* .no_alloc */ true,
  12515. };
  12516. ggml_context * lora_ctx = ggml_init(lora_init_params);
  12517. if (lora_ctx == nullptr) {
  12518. LLAMA_LOG_ERROR("%s: error: failed to initialize lora context\n", __func__);
  12519. ggml_backend_free(backend_cpu);
  12520. return 1;
  12521. }
  12522. // create tensors
  12523. ggml_tensor * loraA = ggml_new_tensor_2d(lora_ctx, metaA.type, metaA.ne[0], metaA.ne[1]);
  12524. ggml_tensor * loraB = ggml_new_tensor_2d(lora_ctx, metaB.type, metaB.ne[0], metaB.ne[1]);
  12525. ggml_set_name(loraA, metaA.name.c_str());
  12526. ggml_set_name(loraB, metaB.name.c_str());
  12527. ggml_tensor * base_t;
  12528. if (ml) {
  12529. if (!ml->get_tensor_meta(base_name.c_str())) {
  12530. LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  12531. return 1;
  12532. }
  12533. base_t = ggml_dup_tensor(lora_ctx, ml->get_tensor_meta(base_name.c_str()));
  12534. } else {
  12535. base_t = ggml_dup_tensor(lora_ctx, model_t);
  12536. }
  12537. ggml_set_name(base_t, base_name.c_str());
  12538. // allocate in backend buffer
  12539. ggml_backend_buffer_t lora_buf = ggml_backend_alloc_ctx_tensors_from_buft(lora_ctx, ggml_backend_cpu_buffer_type());
  12540. if (lora_buf == nullptr) {
  12541. LLAMA_LOG_ERROR("%s: error: failed to allocate lora tensors\n", __func__);
  12542. return 1;
  12543. }
  12544. // load tensor data
  12545. auto load_tensor = [&read_buf, &fin](const tensor_meta & tensor_meta, ggml_tensor * tensor) {
  12546. read_buf.resize(ggml_nbytes(tensor));
  12547. fin.seek(tensor_meta.offset, SEEK_SET);
  12548. fin.read_raw(read_buf.data(), ggml_nbytes(tensor));
  12549. ggml_backend_tensor_set(tensor, read_buf.data(), 0, read_buf.size());
  12550. };
  12551. load_tensor(metaA, loraA);
  12552. load_tensor(metaB, loraB);
  12553. // load base model tensor data
  12554. if (ml) {
  12555. ml->load_data_for(base_t);
  12556. } else {
  12557. ggml_backend_tensor_copy(model_t, base_t);
  12558. }
  12559. if (ggml_is_quantized(base_t->type) && !warned) {
  12560. LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  12561. "use a f16 or f32 base model with --lora-base\n", __func__);
  12562. warned = true;
  12563. }
  12564. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  12565. LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  12566. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  12567. ggml_free(lora_ctx);
  12568. ggml_backend_buffer_free(lora_buf);
  12569. ggml_backend_free(backend_cpu);
  12570. return 1;
  12571. }
  12572. auto build_lora_graph = [&]() {
  12573. // w = w + BA*s
  12574. ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
  12575. ggml_set_name(BA, "BA");
  12576. if (scaling != 1.0f) {
  12577. BA = ggml_scale(lora_ctx, BA, scaling);
  12578. ggml_set_name(BA, "BA_scaled");
  12579. }
  12580. ggml_tensor * r;
  12581. r = ggml_add_inplace(lora_ctx, base_t, BA);
  12582. ggml_set_name(r, "r_add");
  12583. if (base_t->type != model_t->type) {
  12584. // convert the result to the model type
  12585. r = ggml_cast(lora_ctx, r, model_t->type);
  12586. ggml_set_name(r, "r_cast");
  12587. }
  12588. return r;
  12589. };
  12590. ggml_cgraph * gf = ggml_new_graph(lora_ctx);
  12591. ggml_tensor * r = build_lora_graph();
  12592. ggml_build_forward_expand(gf, r);
  12593. ggml_backend_buffer_t graph_buf = ggml_backend_alloc_ctx_tensors_from_buft(lora_ctx, ggml_backend_cpu_buffer_type());
  12594. if (graph_buf == nullptr) {
  12595. LLAMA_LOG_ERROR("%s: error: failed to allocate graph tensors\n", __func__);
  12596. ggml_free(lora_ctx);
  12597. ggml_backend_buffer_free(lora_buf);
  12598. ggml_backend_free(backend_cpu);
  12599. return 1;
  12600. }
  12601. ggml_backend_graph_compute(backend_cpu, gf);
  12602. ggml_backend_tensor_set(model_t, r->data, 0, ggml_nbytes(r));
  12603. #if 0
  12604. // TODO: use scheduler with fallback to CPU for less copies between CPU and GPU
  12605. //ggml_backend_sched_t sched = ggml_backend_sched_new(backends.data(), backends.size(), GGML_DEFAULT_GRAPH_SIZE);
  12606. // sched compute
  12607. ggml_build_forward_expand(gf, build_graph());
  12608. ggml_backend_sched_init_measure(sched, gf);
  12609. // create the graph again, since the previous one was destroyed by the measure
  12610. ggml_graph_clear(gf);
  12611. ggml_build_forward_expand(gf, build_graph());
  12612. ggml_backend_sched_graph_compute(sched, gf);
  12613. ggml_backend_sched_free(sched);
  12614. #endif
  12615. ggml_backend_buffer_free(lora_buf);
  12616. ggml_backend_buffer_free(graph_buf);
  12617. ggml_free(lora_ctx);
  12618. n_tensors++;
  12619. if (n_tensors % 4 == 0) {
  12620. LLAMA_LOG_INFO(".");
  12621. }
  12622. }
  12623. ggml_backend_free(backend_cpu);
  12624. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  12625. LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
  12626. return 0;
  12627. }
  12628. //
  12629. // interface implementation
  12630. //
  12631. struct llama_model_params llama_model_default_params() {
  12632. struct llama_model_params result = {
  12633. /*.n_gpu_layers =*/ 0,
  12634. /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
  12635. /*.main_gpu =*/ 0,
  12636. /*.tensor_split =*/ nullptr,
  12637. /*.rpc_servers =*/ nullptr,
  12638. /*.progress_callback =*/ nullptr,
  12639. /*.progress_callback_user_data =*/ nullptr,
  12640. /*.kv_overrides =*/ nullptr,
  12641. /*.vocab_only =*/ false,
  12642. /*.use_mmap =*/ true,
  12643. /*.use_mlock =*/ false,
  12644. /*.check_tensors =*/ false,
  12645. };
  12646. #ifdef GGML_USE_METAL
  12647. // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
  12648. result.n_gpu_layers = 999;
  12649. #endif
  12650. return result;
  12651. }
  12652. struct llama_context_params llama_context_default_params() {
  12653. struct llama_context_params result = {
  12654. /*.seed =*/ LLAMA_DEFAULT_SEED,
  12655. /*.n_ctx =*/ 512,
  12656. /*.n_batch =*/ 2048,
  12657. /*.n_ubatch =*/ 512,
  12658. /*.n_seq_max =*/ 1,
  12659. /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
  12660. /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
  12661. /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
  12662. /*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED,
  12663. /*.rope_freq_base =*/ 0.0f,
  12664. /*.rope_freq_scale =*/ 0.0f,
  12665. /*.yarn_ext_factor =*/ -1.0f,
  12666. /*.yarn_attn_factor =*/ 1.0f,
  12667. /*.yarn_beta_fast =*/ 32.0f,
  12668. /*.yarn_beta_slow =*/ 1.0f,
  12669. /*.yarn_orig_ctx =*/ 0,
  12670. /*.defrag_thold =*/ -1.0f,
  12671. /*.cb_eval =*/ nullptr,
  12672. /*.cb_eval_user_data =*/ nullptr,
  12673. /*.type_k =*/ GGML_TYPE_F16,
  12674. /*.type_v =*/ GGML_TYPE_F16,
  12675. /*.logits_all =*/ false,
  12676. /*.embeddings =*/ false,
  12677. /*.offload_kqv =*/ true,
  12678. /*.flash_attn =*/ false,
  12679. /*.abort_callback =*/ nullptr,
  12680. /*.abort_callback_data =*/ nullptr,
  12681. };
  12682. return result;
  12683. }
  12684. struct llama_model_quantize_params llama_model_quantize_default_params() {
  12685. struct llama_model_quantize_params result = {
  12686. /*.nthread =*/ 0,
  12687. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  12688. /*.output_tensor_type =*/ GGML_TYPE_COUNT,
  12689. /*.token_embedding_type =*/ GGML_TYPE_COUNT,
  12690. /*.allow_requantize =*/ false,
  12691. /*.quantize_output_tensor =*/ true,
  12692. /*.only_copy =*/ false,
  12693. /*.pure =*/ false,
  12694. /*.keep_split =*/ false,
  12695. /*.imatrix =*/ nullptr,
  12696. /*.kv_overrides =*/ nullptr,
  12697. };
  12698. return result;
  12699. }
  12700. size_t llama_max_devices(void) {
  12701. #if defined(GGML_USE_RPC)
  12702. return GGML_RPC_MAX_SERVERS;
  12703. #elif defined(GGML_USE_METAL)
  12704. return 1;
  12705. #elif defined(GGML_USE_CUDA)
  12706. return GGML_CUDA_MAX_DEVICES;
  12707. #elif defined(GGML_USE_SYCL)
  12708. return GGML_SYCL_MAX_DEVICES;
  12709. #elif defined(GGML_USE_VULKAN)
  12710. return GGML_VK_MAX_DEVICES;
  12711. #else
  12712. return 1;
  12713. #endif
  12714. }
  12715. bool llama_supports_mmap(void) {
  12716. return llama_mmap::SUPPORTED;
  12717. }
  12718. bool llama_supports_mlock(void) {
  12719. return llama_mlock::SUPPORTED;
  12720. }
  12721. bool llama_supports_gpu_offload(void) {
  12722. #if defined(GGML_USE_CUDA) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \
  12723. defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_RPC)
  12724. // Defined when llama.cpp is compiled with support for offloading model layers to GPU.
  12725. return true;
  12726. #else
  12727. return false;
  12728. #endif
  12729. }
  12730. void llama_backend_init(void) {
  12731. ggml_time_init();
  12732. // needed to initialize f16 tables
  12733. {
  12734. struct ggml_init_params params = { 0, NULL, false };
  12735. struct ggml_context * ctx = ggml_init(params);
  12736. ggml_free(ctx);
  12737. }
  12738. }
  12739. void llama_numa_init(enum ggml_numa_strategy numa) {
  12740. if (numa != GGML_NUMA_STRATEGY_DISABLED) {
  12741. ggml_numa_init(numa);
  12742. }
  12743. }
  12744. void llama_backend_free(void) {
  12745. ggml_quantize_free();
  12746. }
  12747. int64_t llama_time_us(void) {
  12748. return ggml_time_us();
  12749. }
  12750. struct llama_model * llama_load_model_from_file(
  12751. const char * path_model,
  12752. struct llama_model_params params) {
  12753. ggml_time_init();
  12754. llama_model * model = new llama_model;
  12755. unsigned cur_percentage = 0;
  12756. if (params.progress_callback == NULL) {
  12757. params.progress_callback_user_data = &cur_percentage;
  12758. params.progress_callback = [](float progress, void * ctx) {
  12759. unsigned * cur_percentage_p = (unsigned *) ctx;
  12760. unsigned percentage = (unsigned) (100 * progress);
  12761. while (percentage > *cur_percentage_p) {
  12762. *cur_percentage_p = percentage;
  12763. LLAMA_LOG_INFO(".");
  12764. if (percentage >= 100) {
  12765. LLAMA_LOG_INFO("\n");
  12766. }
  12767. }
  12768. return true;
  12769. };
  12770. }
  12771. if (params.rpc_servers != nullptr) {
  12772. // split the servers set them into model->rpc_servers
  12773. std::string servers(params.rpc_servers);
  12774. size_t pos = 0;
  12775. while ((pos = servers.find(",")) != std::string::npos) {
  12776. std::string server = servers.substr(0, pos);
  12777. model->rpc_servers.push_back(server);
  12778. servers.erase(0, pos + 1);
  12779. }
  12780. model->rpc_servers.push_back(servers);
  12781. }
  12782. int status = llama_model_load(path_model, *model, params);
  12783. GGML_ASSERT(status <= 0);
  12784. if (status < 0) {
  12785. if (status == -1) {
  12786. LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  12787. } else if (status == -2) {
  12788. LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  12789. }
  12790. delete model;
  12791. return nullptr;
  12792. }
  12793. return model;
  12794. }
  12795. void llama_free_model(struct llama_model * model) {
  12796. delete model;
  12797. }
  12798. struct llama_context * llama_new_context_with_model(
  12799. struct llama_model * model,
  12800. struct llama_context_params params) {
  12801. if (!model) {
  12802. LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__);
  12803. return nullptr;
  12804. }
  12805. if (params.n_batch == 0 && params.n_ubatch == 0) {
  12806. LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__);
  12807. return nullptr;
  12808. }
  12809. if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) {
  12810. LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__);
  12811. return nullptr;
  12812. }
  12813. if (params.flash_attn && model->arch == LLM_ARCH_GROK) {
  12814. LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__);
  12815. params.flash_attn = false;
  12816. }
  12817. llama_context * ctx = new llama_context(*model);
  12818. const auto & hparams = model->hparams;
  12819. auto & cparams = ctx->cparams;
  12820. cparams.n_seq_max = std::max(1u, params.n_seq_max);
  12821. cparams.n_threads = params.n_threads;
  12822. cparams.n_threads_batch = params.n_threads_batch;
  12823. cparams.yarn_ext_factor = params.yarn_ext_factor;
  12824. cparams.yarn_attn_factor = params.yarn_attn_factor;
  12825. cparams.yarn_beta_fast = params.yarn_beta_fast;
  12826. cparams.yarn_beta_slow = params.yarn_beta_slow;
  12827. cparams.defrag_thold = params.defrag_thold;
  12828. cparams.embeddings = params.embeddings;
  12829. cparams.offload_kqv = params.offload_kqv;
  12830. cparams.flash_attn = params.flash_attn;
  12831. cparams.pooling_type = params.pooling_type;
  12832. cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
  12833. cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
  12834. cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
  12835. // this is necessary due to kv_self.n being padded later during inference
  12836. cparams.n_ctx = GGML_PAD(cparams.n_ctx, llama_kv_cache_get_padding(cparams));
  12837. // with causal attention, the batch size is limited by the context size
  12838. cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
  12839. // the batch has to be at least GGML_KQ_MASK_PAD because we will be padding the KQ_mask
  12840. // this is required by GPU kernels in order to avoid out-of-bounds accesses (e.g. ggml_flash_attn_ext)
  12841. // ref: https://github.com/ggerganov/llama.cpp/pull/5021
  12842. if (cparams.n_batch < GGML_KQ_MASK_PAD) {
  12843. LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD);
  12844. cparams.n_batch = GGML_KQ_MASK_PAD;
  12845. }
  12846. cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);
  12847. cparams.n_yarn_orig_ctx = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
  12848. hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx :
  12849. hparams.n_ctx_train;
  12850. cparams.cb_eval = params.cb_eval;
  12851. cparams.cb_eval_user_data = params.cb_eval_user_data;
  12852. auto rope_scaling_type = params.rope_scaling_type;
  12853. if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED) {
  12854. rope_scaling_type = hparams.rope_scaling_type_train;
  12855. }
  12856. if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_NONE) {
  12857. cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
  12858. }
  12859. if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
  12860. cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f;
  12861. }
  12862. cparams.causal_attn = hparams.causal_attn;
  12863. if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
  12864. if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
  12865. cparams.pooling_type = LLAMA_POOLING_TYPE_NONE;
  12866. } else {
  12867. cparams.pooling_type = hparams.pooling_type;
  12868. }
  12869. }
  12870. if (params.seed == LLAMA_DEFAULT_SEED) {
  12871. params.seed = time(NULL);
  12872. }
  12873. LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
  12874. LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch);
  12875. LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch);
  12876. LLAMA_LOG_INFO("%s: flash_attn = %d\n", __func__, cparams.flash_attn);
  12877. LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
  12878. LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
  12879. ctx->abort_callback = params.abort_callback;
  12880. ctx->abort_callback_data = params.abort_callback_data;
  12881. ctx->rng = std::mt19937(params.seed);
  12882. ctx->logits_all = params.logits_all;
  12883. uint32_t kv_size = cparams.n_ctx;
  12884. ggml_type type_k = params.type_k;
  12885. ggml_type type_v = params.type_v;
  12886. // Mamba only needs a constant number of KV cache cells per sequence
  12887. if (model->arch == LLM_ARCH_MAMBA) {
  12888. // Mamba needs at least as many KV cells as there are sequences kept at any time
  12889. kv_size = std::max((uint32_t) 1, params.n_seq_max);
  12890. // it's probably best to keep as much precision as possible for the states
  12891. type_k = GGML_TYPE_F32; // required by ggml_ssm_conv for Mamba's conv_states
  12892. type_v = GGML_TYPE_F32; // required by ggml_ssm_scan for Mamba's ssm_states
  12893. }
  12894. GGML_ASSERT(hparams.n_embd_head_k % ggml_blck_size(type_k) == 0);
  12895. GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0);
  12896. if (!hparams.vocab_only) {
  12897. // initialize backends
  12898. #if defined(GGML_USE_RPC)
  12899. for (auto & server : model->rpc_servers) {
  12900. ggml_backend_t backend = ggml_backend_rpc_init(server.c_str());
  12901. if (backend == nullptr) {
  12902. LLAMA_LOG_ERROR("%s: failed to connect RPC backend to %s\n", __func__, server.c_str());
  12903. llama_free(ctx);
  12904. return nullptr;
  12905. }
  12906. ctx->backends.push_back(backend);
  12907. }
  12908. #elif defined(GGML_USE_METAL)
  12909. if (model->n_gpu_layers > 0) {
  12910. ctx->backend_metal = ggml_backend_metal_init();
  12911. if (ctx->backend_metal == nullptr) {
  12912. LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__);
  12913. llama_free(ctx);
  12914. return nullptr;
  12915. }
  12916. ctx->backends.push_back(ctx->backend_metal);
  12917. }
  12918. #elif defined(GGML_USE_CUDA)
  12919. if (model->split_mode == LLAMA_SPLIT_MODE_NONE || model->split_mode == LLAMA_SPLIT_MODE_ROW) {
  12920. // with split_mode LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_ROW, only the main GPU backend is used
  12921. ggml_backend_t backend = ggml_backend_cuda_init(model->main_gpu);
  12922. if (backend == nullptr) {
  12923. LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, model->main_gpu);
  12924. llama_free(ctx);
  12925. return nullptr;
  12926. }
  12927. ctx->backends.push_back(backend);
  12928. } else {
  12929. // LLAMA_SPLIT_MODE_LAYER requires a backend for each GPU
  12930. for (int device = 0; device < ggml_backend_cuda_get_device_count(); ++device) {
  12931. ggml_backend_t backend = ggml_backend_cuda_init(device);
  12932. if (backend == nullptr) {
  12933. LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, device);
  12934. llama_free(ctx);
  12935. return nullptr;
  12936. }
  12937. ctx->backends.push_back(backend);
  12938. }
  12939. }
  12940. #elif defined(GGML_USE_VULKAN)
  12941. if (model->split_mode == LLAMA_SPLIT_MODE_ROW) {
  12942. LLAMA_LOG_ERROR("%s: Row split not supported. Failed to initialize Vulkan backend\n", __func__);
  12943. llama_free(ctx);
  12944. return nullptr;
  12945. }
  12946. if (model->split_mode == LLAMA_SPLIT_MODE_NONE) {
  12947. ggml_backend_t backend = ggml_backend_vk_init(0);
  12948. if (backend == nullptr) {
  12949. LLAMA_LOG_ERROR("%s: failed to initialize Vulkan backend\n", __func__);
  12950. llama_free(ctx);
  12951. return nullptr;
  12952. }
  12953. ctx->backends.push_back(backend);
  12954. } else {
  12955. for (int device = 0; device < ggml_backend_vk_get_device_count(); ++device) {
  12956. ggml_backend_t backend = ggml_backend_vk_init(device);
  12957. if (backend == nullptr) {
  12958. LLAMA_LOG_ERROR("%s: failed to initialize Vulkan%d backend\n", __func__, device);
  12959. llama_free(ctx);
  12960. return nullptr;
  12961. }
  12962. ctx->backends.push_back(backend);
  12963. }
  12964. }
  12965. #elif defined(GGML_USE_SYCL)
  12966. // with split_mode LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_ROW, only the main GPU backend is used
  12967. if (model->split_mode == LLAMA_SPLIT_MODE_NONE || model->split_mode == LLAMA_SPLIT_MODE_ROW) {
  12968. ggml_backend_t backend = ggml_backend_sycl_init(model->main_gpu);
  12969. if (backend == nullptr) {
  12970. int main_gpu_id = ggml_backend_sycl_get_device_id(model->main_gpu);
  12971. LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d (index %d) backend\n", __func__, main_gpu_id, model->main_gpu);
  12972. llama_free(ctx);
  12973. return nullptr;
  12974. }
  12975. ctx->backends.push_back(backend);
  12976. } else {
  12977. // LLAMA_SPLIT_LAYER requires a backend for each GPU
  12978. for (int i = 0; i < ggml_backend_sycl_get_device_count(); ++i) {
  12979. ggml_backend_t backend = ggml_backend_sycl_init(i);
  12980. if (backend == nullptr) {
  12981. int id_list[GGML_SYCL_MAX_DEVICES];
  12982. ggml_sycl_get_gpu_list(id_list, GGML_SYCL_MAX_DEVICES);
  12983. LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d (index %d) backend\n", __func__, id_list[i], i);
  12984. llama_free(ctx);
  12985. return nullptr;
  12986. }
  12987. ctx->backends.push_back(backend);
  12988. }
  12989. }
  12990. #elif defined(GGML_USE_KOMPUTE)
  12991. if (model->n_gpu_layers > 0) {
  12992. auto * backend = ggml_backend_kompute_init(model->main_gpu);
  12993. if (backend == nullptr) {
  12994. LLAMA_LOG_ERROR("%s: failed to initialize Kompute backend\n", __func__);
  12995. llama_free(ctx);
  12996. return nullptr;
  12997. }
  12998. ctx->backends.push_back(backend);
  12999. }
  13000. #endif
  13001. ctx->backend_cpu = ggml_backend_cpu_init();
  13002. if (ctx->backend_cpu == nullptr) {
  13003. LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__);
  13004. llama_free(ctx);
  13005. return nullptr;
  13006. }
  13007. ctx->backends.push_back(ctx->backend_cpu);
  13008. if (!llama_kv_cache_init(ctx->kv_self, ctx, type_k, type_v, kv_size, cparams.offload_kqv)) {
  13009. LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
  13010. llama_free(ctx);
  13011. return nullptr;
  13012. }
  13013. {
  13014. size_t memory_size_k = 0;
  13015. size_t memory_size_v = 0;
  13016. for (auto & k : ctx->kv_self.k_l) {
  13017. memory_size_k += ggml_nbytes(k);
  13018. }
  13019. for (auto & v : ctx->kv_self.v_l) {
  13020. memory_size_v += ggml_nbytes(v);
  13021. }
  13022. LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
  13023. (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
  13024. ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
  13025. ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
  13026. }
  13027. // graph outputs buffer
  13028. {
  13029. // resized during inference when a batch uses more outputs
  13030. if (llama_output_reserve(*ctx, params.n_seq_max) < params.n_seq_max) {
  13031. LLAMA_LOG_ERROR("%s: failed to reserve initial output buffer\n", __func__);
  13032. llama_free(ctx);
  13033. return nullptr;
  13034. }
  13035. LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__,
  13036. ggml_backend_buffer_name(ctx->buf_output),
  13037. ggml_backend_buffer_get_size(ctx->buf_output) / 1024.0 / 1024.0);
  13038. }
  13039. // scheduler and compute buffers
  13040. {
  13041. // buffer types used for the compute buffer of each backend
  13042. std::vector<ggml_backend_buffer_type_t> backend_buft;
  13043. for (auto * backend : ctx->backends) {
  13044. if (ggml_backend_is_cpu(backend)) {
  13045. // use host buffers for the CPU backend compute buffer
  13046. backend_buft.push_back(llama_default_buffer_type_cpu(true));
  13047. } else {
  13048. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  13049. }
  13050. }
  13051. // buffer used to store the computation graph and the tensor meta data
  13052. ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead_custom(LLAMA_MAX_NODES, false));
  13053. // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary
  13054. bool pipeline_parallel =
  13055. llama_get_device_count(*model) > 1 &&
  13056. model->n_gpu_layers > (int)model->hparams.n_layer &&
  13057. model->split_mode == LLAMA_SPLIT_MODE_LAYER &&
  13058. params.offload_kqv;
  13059. #ifndef GGML_USE_CUDA
  13060. // pipeline parallelism requires support for async compute and events
  13061. // currently this is only implemented in the CUDA backend
  13062. pipeline_parallel = false;
  13063. #endif
  13064. ctx->sched = ggml_backend_sched_new(ctx->backends.data(), backend_buft.data(), ctx->backends.size(), LLAMA_MAX_NODES, pipeline_parallel);
  13065. if (pipeline_parallel) {
  13066. LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(ctx->sched));
  13067. }
  13068. // build worst-case graph
  13069. int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_ubatch);
  13070. int n_past = cparams.n_ctx - n_tokens;
  13071. llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  13072. ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0), true);
  13073. // initialize scheduler with the worst-case graph
  13074. if (!ggml_backend_sched_reserve(ctx->sched, gf)) {
  13075. LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
  13076. llama_free(ctx);
  13077. return nullptr;
  13078. }
  13079. for (size_t i = 0; i < ctx->backends.size(); i++) {
  13080. ggml_backend_t backend = ctx->backends[i];
  13081. ggml_backend_buffer_type_t buft = backend_buft[i];
  13082. size_t size = ggml_backend_sched_get_buffer_size(ctx->sched, backend);
  13083. if (size > 1) {
  13084. LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  13085. ggml_backend_buft_name(buft),
  13086. size / 1024.0 / 1024.0);
  13087. }
  13088. }
  13089. // note: the number of splits during measure is higher than during inference due to the kv shift
  13090. int n_splits = ggml_backend_sched_get_n_splits(ctx->sched);
  13091. LLAMA_LOG_INFO("%s: graph nodes = %d\n", __func__, gf->n_nodes);
  13092. LLAMA_LOG_INFO("%s: graph splits = %d\n", __func__, n_splits);
  13093. }
  13094. }
  13095. return ctx;
  13096. }
  13097. void llama_free(struct llama_context * ctx) {
  13098. delete ctx;
  13099. }
  13100. const llama_model * llama_get_model(const struct llama_context * ctx) {
  13101. return &ctx->model;
  13102. }
  13103. uint32_t llama_n_ctx(const struct llama_context * ctx) {
  13104. return ctx->cparams.n_ctx;
  13105. }
  13106. uint32_t llama_n_batch(const struct llama_context * ctx) {
  13107. return ctx->cparams.n_batch;
  13108. }
  13109. uint32_t llama_n_ubatch(const struct llama_context * ctx) {
  13110. return ctx->cparams.n_ubatch;
  13111. }
  13112. uint32_t llama_n_seq_max(const struct llama_context * ctx) {
  13113. return ctx->kv_self.size;
  13114. }
  13115. enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
  13116. return model->vocab.type;
  13117. }
  13118. enum llama_rope_type llama_rope_type(const struct llama_model * model) {
  13119. switch (model->arch) {
  13120. // these models do not use RoPE
  13121. case LLM_ARCH_GPT2:
  13122. case LLM_ARCH_GPTJ:
  13123. case LLM_ARCH_GPTNEOX:
  13124. case LLM_ARCH_MPT:
  13125. case LLM_ARCH_REFACT:
  13126. case LLM_ARCH_BLOOM:
  13127. case LLM_ARCH_MAMBA:
  13128. case LLM_ARCH_JINA_BERT_V2:
  13129. return LLAMA_ROPE_TYPE_NONE;
  13130. // use what we call a normal RoPE, operating on pairs of consecutive head values
  13131. case LLM_ARCH_LLAMA:
  13132. case LLM_ARCH_BAICHUAN:
  13133. case LLM_ARCH_STARCODER:
  13134. case LLM_ARCH_PLAMO:
  13135. case LLM_ARCH_CODESHELL:
  13136. case LLM_ARCH_ORION:
  13137. case LLM_ARCH_INTERNLM2:
  13138. case LLM_ARCH_MINICPM:
  13139. case LLM_ARCH_XVERSE:
  13140. case LLM_ARCH_COMMAND_R:
  13141. case LLM_ARCH_OLMO:
  13142. return LLAMA_ROPE_TYPE_NORM;
  13143. // the pairs of head values are offset by n_rot/2
  13144. case LLM_ARCH_FALCON:
  13145. case LLM_ARCH_GROK:
  13146. case LLM_ARCH_DBRX:
  13147. case LLM_ARCH_BERT:
  13148. case LLM_ARCH_NOMIC_BERT:
  13149. case LLM_ARCH_STABLELM:
  13150. case LLM_ARCH_QWEN:
  13151. case LLM_ARCH_QWEN2:
  13152. case LLM_ARCH_QWEN2MOE:
  13153. case LLM_ARCH_PHI2:
  13154. case LLM_ARCH_PHI3:
  13155. case LLM_ARCH_GEMMA:
  13156. case LLM_ARCH_STARCODER2:
  13157. return LLAMA_ROPE_TYPE_NEOX;
  13158. // all model arches should be listed explicitly here
  13159. case LLM_ARCH_UNKNOWN:
  13160. GGML_ASSERT(false && "unknown architecture");
  13161. break;
  13162. }
  13163. return LLAMA_ROPE_TYPE_NONE;
  13164. }
  13165. enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx) {
  13166. return ctx->cparams.pooling_type;
  13167. }
  13168. int32_t llama_n_vocab(const struct llama_model * model) {
  13169. return model->hparams.n_vocab;
  13170. }
  13171. int32_t llama_n_ctx_train(const struct llama_model * model) {
  13172. return model->hparams.n_ctx_train;
  13173. }
  13174. int32_t llama_n_embd(const struct llama_model * model) {
  13175. return model->hparams.n_embd;
  13176. }
  13177. int32_t llama_n_layer(const struct llama_model * model) {
  13178. return model->hparams.n_layer;
  13179. }
  13180. float llama_rope_freq_scale_train(const struct llama_model * model) {
  13181. return model->hparams.rope_freq_scale_train;
  13182. }
  13183. int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
  13184. const auto & it = model->gguf_kv.find(key);
  13185. if (it == model->gguf_kv.end()) {
  13186. if (buf_size > 0) {
  13187. buf[0] = '\0';
  13188. }
  13189. return -1;
  13190. }
  13191. return snprintf(buf, buf_size, "%s", it->second.c_str());
  13192. }
  13193. int32_t llama_model_meta_count(const struct llama_model * model) {
  13194. return (int)model->gguf_kv.size();
  13195. }
  13196. int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
  13197. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  13198. if (buf_size > 0) {
  13199. buf[0] = '\0';
  13200. }
  13201. return -1;
  13202. }
  13203. auto it = model->gguf_kv.begin();
  13204. std::advance(it, i);
  13205. return snprintf(buf, buf_size, "%s", it->first.c_str());
  13206. }
  13207. int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
  13208. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  13209. if (buf_size > 0) {
  13210. buf[0] = '\0';
  13211. }
  13212. return -1;
  13213. }
  13214. auto it = model->gguf_kv.begin();
  13215. std::advance(it, i);
  13216. return snprintf(buf, buf_size, "%s", it->second.c_str());
  13217. }
  13218. int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
  13219. return snprintf(buf, buf_size, "%s %s %s",
  13220. llama_model_arch_name(model->arch),
  13221. llama_model_type_name(model->type),
  13222. llama_model_ftype_name(model->ftype).c_str());
  13223. }
  13224. uint64_t llama_model_size(const struct llama_model * model) {
  13225. uint64_t size = 0;
  13226. for (const auto & it : model->tensors_by_name) {
  13227. size += ggml_nbytes(it.second);
  13228. }
  13229. return size;
  13230. }
  13231. uint64_t llama_model_n_params(const struct llama_model * model) {
  13232. uint64_t nparams = 0;
  13233. for (const auto & it : model->tensors_by_name) {
  13234. nparams += ggml_nelements(it.second);
  13235. }
  13236. return nparams;
  13237. }
  13238. struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
  13239. auto it = std::find_if(model->tensors_by_name.begin(), model->tensors_by_name.end(),
  13240. [name](const std::pair<std::string, struct ggml_tensor *> & it) {
  13241. return it.first == name;
  13242. });
  13243. if (it == model->tensors_by_name.end()) {
  13244. return nullptr;
  13245. }
  13246. return it->second;
  13247. }
  13248. uint32_t llama_model_quantize(
  13249. const char * fname_inp,
  13250. const char * fname_out,
  13251. const llama_model_quantize_params * params) {
  13252. try {
  13253. llama_model_quantize_internal(fname_inp, fname_out, params);
  13254. return 0;
  13255. } catch (const std::exception & err) {
  13256. LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
  13257. return 1;
  13258. }
  13259. }
  13260. int32_t llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, float scale, const char * path_base_model, int32_t n_threads) {
  13261. try {
  13262. return llama_apply_lora_from_file_internal(*model, path_lora, scale, path_base_model, n_threads);
  13263. } catch (const std::exception & err) {
  13264. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  13265. return 1;
  13266. }
  13267. }
  13268. static bool llama_control_vector_init(struct llama_control_vector & cvec, const llama_model & model) {
  13269. GGML_ASSERT(cvec.tensors.empty());
  13270. GGML_ASSERT(cvec.ctxs.empty());
  13271. GGML_ASSERT(cvec.bufs.empty());
  13272. // count layer buffer types
  13273. std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
  13274. for (int64_t i = 0; i < model.hparams.n_layer; i++) {
  13275. buft_layer_count[model.buft_layer[i].buft]++;
  13276. }
  13277. // allocate contexts
  13278. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  13279. for (auto & it : buft_layer_count) {
  13280. int n_layers = it.second;
  13281. struct ggml_init_params params = {
  13282. /*.mem_size =*/ n_layers * ggml_tensor_overhead(),
  13283. /*.mem_buffer =*/ NULL,
  13284. /*.no_alloc =*/ true,
  13285. };
  13286. ggml_context * ctx = ggml_init(params);
  13287. if (!ctx) {
  13288. LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
  13289. return 1;
  13290. }
  13291. ctx_map[it.first] = ctx;
  13292. }
  13293. // make tensors
  13294. cvec.tensors.reserve(model.hparams.n_layer);
  13295. cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0
  13296. for (size_t il = 1; il < model.hparams.n_layer; il++) {
  13297. struct ggml_context * ctx = ctx_map.at(model.buft_layer[il].buft);
  13298. ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
  13299. cvec.tensors.push_back(tensor);
  13300. }
  13301. // allocate tensors / buffers and zero
  13302. cvec.ctxs.reserve(ctx_map.size());
  13303. cvec.bufs.reserve(ctx_map.size());
  13304. for (auto it : ctx_map) {
  13305. ggml_backend_buffer_type_t buft = it.first;
  13306. ggml_context * ctx = it.second;
  13307. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  13308. if (!buf) {
  13309. LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
  13310. return false;
  13311. }
  13312. ggml_backend_buffer_clear(buf, 0);
  13313. cvec.ctxs.push_back(ctx);
  13314. cvec.bufs.push_back(buf);
  13315. }
  13316. return true;
  13317. }
  13318. int32_t llama_control_vector_apply(struct llama_context * lctx, const float * data, size_t len, int32_t n_embd, int32_t il_start, int32_t il_end) {
  13319. const llama_model & model = lctx->model;
  13320. llama_control_vector & cvec = lctx->cvec;
  13321. if (data == nullptr) {
  13322. // disable the current control vector (but leave allocated for later)
  13323. cvec.layer_start = -1;
  13324. cvec.layer_end = -1;
  13325. return 0;
  13326. }
  13327. if (n_embd != (int) model.hparams.n_embd) {
  13328. LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
  13329. return 1;
  13330. }
  13331. if (cvec.tensors.empty()) {
  13332. if (!llama_control_vector_init(cvec, model)) {
  13333. return 1;
  13334. }
  13335. }
  13336. cvec.layer_start = il_start;
  13337. cvec.layer_end = il_end;
  13338. for (size_t il = 1; il < model.hparams.n_layer; il++) {
  13339. assert(cvec.tensors[il] != nullptr);
  13340. const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
  13341. if (off + n_embd <= len) {
  13342. ggml_backend_tensor_set(cvec.tensors[il], data + off, 0, n_embd * ggml_element_size(cvec.tensors[il]));
  13343. }
  13344. }
  13345. return 0;
  13346. }
  13347. struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max) {
  13348. struct llama_kv_cache_view result = {
  13349. /*.n_cells = */ 0,
  13350. /*.n_seq_max = */ n_seq_max,
  13351. /*.token_count = */ 0,
  13352. /*.used_cells = */ llama_get_kv_cache_used_cells(ctx),
  13353. /*.max_contiguous = */ 0,
  13354. /*.max_contiguous_idx = */ -1,
  13355. /*.cells = */ nullptr,
  13356. /*.cells_sequences = */ nullptr,
  13357. };
  13358. return result;
  13359. }
  13360. void llama_kv_cache_view_free(struct llama_kv_cache_view * view) {
  13361. if (view->cells != nullptr) {
  13362. free(view->cells);
  13363. view->cells = nullptr;
  13364. }
  13365. if (view->cells_sequences != nullptr) {
  13366. free(view->cells_sequences);
  13367. view->cells_sequences = nullptr;
  13368. }
  13369. }
  13370. void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) {
  13371. if (uint32_t(view->n_cells) < ctx->kv_self.size || view->cells == nullptr) {
  13372. view->n_cells = int32_t(ctx->kv_self.size);
  13373. void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells);
  13374. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells");
  13375. view->cells = (struct llama_kv_cache_view_cell *)p;
  13376. p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_seq_max * view->n_cells);
  13377. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences");
  13378. view->cells_sequences = (llama_seq_id *)p;
  13379. }
  13380. const std::vector<llama_kv_cell> & kv_cells = ctx->kv_self.cells;
  13381. llama_kv_cache_view_cell * c_curr = view->cells;
  13382. llama_seq_id * cs_curr = view->cells_sequences;
  13383. int32_t used_cells = 0;
  13384. int32_t token_count = 0;
  13385. int32_t curr_contig_idx = -1;
  13386. uint32_t max_contig = 0;
  13387. int32_t max_contig_idx = -1;
  13388. for (int32_t i = 0; i < int32_t(ctx->kv_self.size); i++, c_curr++, cs_curr += view->n_seq_max) {
  13389. const size_t curr_size = kv_cells[i].seq_id.size();
  13390. token_count += curr_size;
  13391. c_curr->pos = kv_cells[i].pos + kv_cells[i].delta;
  13392. if (curr_size > 0) {
  13393. if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) {
  13394. max_contig = i - curr_contig_idx;
  13395. max_contig_idx = curr_contig_idx;
  13396. }
  13397. curr_contig_idx = -1;
  13398. } else if (curr_contig_idx < 0) {
  13399. curr_contig_idx = i;
  13400. }
  13401. int seq_idx = 0;
  13402. for (const llama_seq_id it : kv_cells[i].seq_id) {
  13403. if (seq_idx >= view->n_seq_max) {
  13404. break;
  13405. }
  13406. cs_curr[seq_idx] = it;
  13407. seq_idx++;
  13408. }
  13409. if (seq_idx != 0) {
  13410. used_cells++;
  13411. }
  13412. for (; seq_idx < view->n_seq_max; seq_idx++) {
  13413. cs_curr[seq_idx] = -1;
  13414. }
  13415. }
  13416. if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) {
  13417. max_contig_idx = curr_contig_idx;
  13418. max_contig = kv_cells.size() - curr_contig_idx;
  13419. }
  13420. view->max_contiguous = max_contig;
  13421. view->max_contiguous_idx = max_contig_idx;
  13422. view->token_count = token_count;
  13423. view->used_cells = used_cells;
  13424. if (uint32_t(used_cells) != ctx->kv_self.used) {
  13425. LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n",
  13426. __func__, ctx->kv_self.used, used_cells);
  13427. }
  13428. }
  13429. int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  13430. int result = 0;
  13431. for (uint32_t i = 0; i < ctx->kv_self.size; i++) {
  13432. result += ctx->kv_self.cells[i].seq_id.size();
  13433. }
  13434. return result;
  13435. }
  13436. int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx) {
  13437. return ctx->kv_self.used;
  13438. }
  13439. void llama_kv_cache_clear(struct llama_context * ctx) {
  13440. llama_kv_cache_clear(ctx->kv_self);
  13441. }
  13442. bool llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  13443. return llama_kv_cache_seq_rm(ctx->kv_self, seq_id, p0, p1);
  13444. }
  13445. void llama_kv_cache_seq_cp(struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  13446. if (seq_id_src == seq_id_dst) {
  13447. return;
  13448. }
  13449. llama_kv_cache_seq_cp(ctx->kv_self, seq_id_src, seq_id_dst, p0, p1);
  13450. }
  13451. void llama_kv_cache_seq_keep(struct llama_context * ctx, llama_seq_id seq_id) {
  13452. llama_kv_cache_seq_keep(ctx->kv_self, seq_id);
  13453. }
  13454. void llama_kv_cache_seq_add(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) {
  13455. if (delta == 0) {
  13456. return;
  13457. }
  13458. llama_kv_cache_seq_add(ctx->kv_self, seq_id, p0, p1, delta);
  13459. }
  13460. void llama_kv_cache_seq_div(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
  13461. if (d == 1) {
  13462. return;
  13463. }
  13464. llama_kv_cache_seq_div(ctx->kv_self, seq_id, p0, p1, d);
  13465. }
  13466. llama_pos llama_kv_cache_seq_pos_max(struct llama_context * ctx, llama_seq_id seq_id) {
  13467. return llama_kv_cache_seq_pos_max(ctx->kv_self, seq_id);
  13468. }
  13469. void llama_kv_cache_defrag(struct llama_context * ctx) {
  13470. llama_kv_cache_defrag(ctx->kv_self);
  13471. }
  13472. void llama_kv_cache_update(struct llama_context * ctx) {
  13473. llama_kv_cache_update_internal(*ctx);
  13474. }
  13475. // deprecated
  13476. size_t llama_get_state_size(const struct llama_context * ctx) {
  13477. return llama_state_get_size(ctx);
  13478. }
  13479. // deprecated
  13480. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  13481. return llama_state_get_data(ctx, dst);
  13482. }
  13483. // deprecated
  13484. size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) {
  13485. return llama_state_set_data(ctx, src);
  13486. }
  13487. // deprecated
  13488. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  13489. return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  13490. }
  13491. // deprecated
  13492. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  13493. return llama_state_save_file(ctx, path_session, tokens, n_token_count);
  13494. }
  13495. // Returns the *maximum* size of the state
  13496. size_t llama_state_get_size(const struct llama_context * ctx) {
  13497. const auto & cparams = ctx->cparams;
  13498. const auto & hparams = ctx->model.hparams;
  13499. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  13500. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  13501. const size_t s_rng_size = sizeof(size_t);
  13502. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  13503. const size_t s_n_outputs = sizeof(size_t);
  13504. // assume worst case for outputs although only currently set ones are serialized
  13505. const size_t s_output_pos = ctx->cparams.n_batch * sizeof(int32_t);
  13506. const size_t s_logits_size = sizeof(size_t);
  13507. const size_t s_logits = ctx->logits_size ? cparams.n_batch * hparams.n_vocab * sizeof(float) : 0;
  13508. const size_t s_embedding_size = sizeof(size_t);
  13509. const size_t s_embedding = ctx->embd_size ? cparams.n_batch * hparams.n_embd * sizeof(float) : 0;
  13510. const size_t s_kv_buf_size = sizeof(size_t);
  13511. const size_t s_kv_head = sizeof(uint32_t);
  13512. const size_t s_kv_size = sizeof(uint32_t);
  13513. const size_t s_kv_used = sizeof(uint32_t);
  13514. const size_t s_v_trans = sizeof(uint32_t);
  13515. const size_t s_kv = ctx->kv_self.total_size();
  13516. const size_t s_kv_cell = sizeof(llama_pos) + sizeof(size_t) + cparams.n_seq_max*sizeof(llama_seq_id);
  13517. const size_t s_kv_cells = ctx->kv_self.size * s_kv_cell;
  13518. const size_t s_total = (
  13519. + s_rng_size
  13520. + s_rng
  13521. + s_n_outputs
  13522. + s_output_pos
  13523. + s_logits_size
  13524. + s_logits
  13525. + s_embedding_size
  13526. + s_embedding
  13527. + s_kv_buf_size
  13528. + s_kv_head
  13529. + s_kv_size
  13530. + s_kv_used
  13531. + s_v_trans
  13532. + s_kv
  13533. + s_kv_cells
  13534. );
  13535. // on session change it is very likely that the state size has changed - so we need to update this function
  13536. static_assert(LLAMA_SESSION_VERSION == 6, "So you just bumped the session version - good. But did you remember to update llama_state_get_size?");
  13537. return s_total;
  13538. }
  13539. // llama_context_data
  13540. struct llama_data_context {
  13541. virtual void write(const void * src, size_t size) = 0;
  13542. virtual size_t get_size_written() = 0;
  13543. virtual ~llama_data_context() = default;
  13544. };
  13545. struct llama_data_buffer_context : llama_data_context {
  13546. uint8_t * ptr;
  13547. size_t size_written = 0;
  13548. llama_data_buffer_context(uint8_t * p) : ptr(p) {}
  13549. void write(const void * src, size_t size) override {
  13550. memcpy(ptr, src, size);
  13551. ptr += size;
  13552. size_written += size;
  13553. }
  13554. size_t get_size_written() override {
  13555. return size_written;
  13556. }
  13557. };
  13558. struct llama_data_file_context : llama_data_context {
  13559. llama_file * file;
  13560. size_t size_written = 0;
  13561. llama_data_file_context(llama_file * f) : file(f) {}
  13562. void write(const void * src, size_t size) override {
  13563. file->write_raw(src, size);
  13564. size_written += size;
  13565. }
  13566. size_t get_size_written() override {
  13567. return size_written;
  13568. }
  13569. };
  13570. /** copy state data into either a buffer or file depending on the passed in context
  13571. *
  13572. * file context:
  13573. * llama_file file("/path", "wb");
  13574. * llama_data_file_context data_ctx(&file);
  13575. * llama_state_get_data(ctx, &data_ctx);
  13576. *
  13577. * buffer context:
  13578. * std::vector<uint8_t> buf(max_size, 0);
  13579. * llama_data_buffer_context data_ctx(&buf.data());
  13580. * llama_state_get_data(ctx, &data_ctx);
  13581. *
  13582. */
  13583. static void llama_state_get_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
  13584. llama_synchronize(ctx);
  13585. // copy rng
  13586. {
  13587. std::ostringstream rng_ss;
  13588. rng_ss << ctx->rng;
  13589. const std::string & rng_str = rng_ss.str();
  13590. const size_t rng_size = rng_str.size();
  13591. GGML_ASSERT(rng_size <= LLAMA_MAX_RNG_STATE);
  13592. data_ctx->write(&rng_size, sizeof(rng_size));
  13593. data_ctx->write(rng_str.data(), rng_size);
  13594. }
  13595. // copy outputs
  13596. {
  13597. // Can't use ctx->n_outputs because it's not for the
  13598. // entire last batch when n_ubatch is smaller than n_batch
  13599. size_t n_outputs = 0;
  13600. // copy output ids
  13601. {
  13602. std::vector<int32_t> output_pos;
  13603. const size_t n_batch = ctx->cparams.n_batch;
  13604. const auto & output_ids = ctx->output_ids;
  13605. output_pos.resize(ctx->output_size);
  13606. // build a more compact representation of the output ids
  13607. for (size_t i = 0; i < n_batch; ++i) {
  13608. // map an output id to a position in the batch
  13609. int32_t pos = output_ids[i];
  13610. if (pos >= 0) {
  13611. if ((size_t) pos >= n_outputs) {
  13612. n_outputs = pos + 1;
  13613. }
  13614. GGML_ASSERT((size_t) pos < ctx->output_size);
  13615. output_pos[pos] = i;
  13616. }
  13617. }
  13618. data_ctx->write(&n_outputs, sizeof(n_outputs));
  13619. if (n_outputs) {
  13620. data_ctx->write(output_pos.data(), n_outputs * sizeof(int32_t));
  13621. }
  13622. }
  13623. // copy logits
  13624. {
  13625. const size_t logits_size = std::min(ctx->logits_size, n_outputs * ctx->model.hparams.n_vocab);
  13626. data_ctx->write(&logits_size, sizeof(logits_size));
  13627. if (logits_size) {
  13628. data_ctx->write(ctx->logits, logits_size * sizeof(float));
  13629. }
  13630. }
  13631. // copy embeddings
  13632. {
  13633. const size_t embeddings_size = std::min(ctx->embd_size, n_outputs * ctx->model.hparams.n_embd);
  13634. data_ctx->write(&embeddings_size, sizeof(embeddings_size));
  13635. if (embeddings_size) {
  13636. data_ctx->write(ctx->embd, embeddings_size * sizeof(float));
  13637. }
  13638. }
  13639. }
  13640. // copy kv cache
  13641. {
  13642. const auto & kv_self = ctx->kv_self;
  13643. const auto & hparams = ctx->model.hparams;
  13644. const uint32_t n_layer = hparams.n_layer;
  13645. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  13646. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  13647. // NOTE: kv_size and kv_buf_size are mostly used for sanity checks
  13648. const uint32_t kv_head = llama_kv_cache_cell_max(kv_self);
  13649. const uint32_t kv_size = kv_self.size;
  13650. const size_t kv_buf_size = kv_self.total_size() / (kv_size ? kv_size : 1) * kv_head;
  13651. const uint32_t kv_used = kv_self.used;
  13652. const uint32_t v_trans = kv_self.v_trans ? 1 : 0;
  13653. data_ctx->write(&kv_buf_size, sizeof(kv_buf_size));
  13654. data_ctx->write(&kv_head, sizeof(kv_head));
  13655. data_ctx->write(&kv_size, sizeof(kv_size));
  13656. data_ctx->write(&kv_used, sizeof(kv_used));
  13657. data_ctx->write(&v_trans, sizeof(v_trans));
  13658. if (kv_buf_size) {
  13659. const size_t pre_kv_buf_size = data_ctx->get_size_written();
  13660. std::vector<uint8_t> tmp_buf;
  13661. for (int il = 0; il < (int) n_layer; ++il) {
  13662. const size_t k_size = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*kv_head);
  13663. tmp_buf.resize(k_size);
  13664. ggml_backend_tensor_get(kv_self.k_l[il], tmp_buf.data(), 0, tmp_buf.size());
  13665. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  13666. if (kv_self.recurrent || !kv_self.v_trans) {
  13667. // v is contiguous for recurrent models
  13668. // TODO: use other tensors for state models than k and v
  13669. const size_t v_size = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*kv_head);
  13670. tmp_buf.resize(v_size);
  13671. ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), 0, tmp_buf.size());
  13672. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  13673. continue;
  13674. }
  13675. // v is not contiguous, copy row by row
  13676. const size_t v_row_size = ggml_row_size(kv_self.v_l[il]->type, kv_head);
  13677. const size_t v_row_stride = ggml_row_size(kv_self.v_l[il]->type, kv_size);
  13678. tmp_buf.resize(v_row_size);
  13679. for (int ir = 0; ir < (int) n_embd_v_gqa; ++ir) {
  13680. ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), ir*v_row_stride, tmp_buf.size());
  13681. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  13682. }
  13683. }
  13684. GGML_ASSERT(kv_buf_size == data_ctx->get_size_written() - pre_kv_buf_size);
  13685. }
  13686. for (uint32_t i = 0; i < kv_head; ++i) {
  13687. const auto & cell = kv_self.cells[i];
  13688. const llama_pos pos = cell.pos;
  13689. const size_t seq_id_size = cell.seq_id.size();
  13690. data_ctx->write(&pos, sizeof(pos));
  13691. data_ctx->write(&seq_id_size, sizeof(seq_id_size));
  13692. for (auto seq_id : cell.seq_id) {
  13693. data_ctx->write(&seq_id, sizeof(seq_id));
  13694. }
  13695. }
  13696. }
  13697. }
  13698. size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst) {
  13699. llama_data_buffer_context data_ctx(dst);
  13700. llama_state_get_data_internal(ctx, &data_ctx);
  13701. return data_ctx.get_size_written();
  13702. }
  13703. // Sets the state reading from the specified source address
  13704. size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src) {
  13705. llama_synchronize(ctx);
  13706. const uint8_t * inp = src;
  13707. // set rng
  13708. {
  13709. size_t rng_size;
  13710. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  13711. GGML_ASSERT(rng_size <= LLAMA_MAX_RNG_STATE);
  13712. std::string rng_str((const char *)inp, rng_size); inp += rng_size;
  13713. std::istringstream rng_ss(rng_str);
  13714. rng_ss >> ctx->rng;
  13715. GGML_ASSERT(!rng_ss.fail());
  13716. }
  13717. // set output ids
  13718. {
  13719. size_t n_outputs;
  13720. std::vector<int32_t> output_pos;
  13721. memcpy(&n_outputs, inp, sizeof(n_outputs)); inp += sizeof(n_outputs);
  13722. GGML_ASSERT(n_outputs <= llama_output_reserve(*ctx, n_outputs));
  13723. if (n_outputs) {
  13724. output_pos.resize(n_outputs);
  13725. memcpy(output_pos.data(), inp, n_outputs * sizeof(int32_t));
  13726. inp += n_outputs * sizeof(int32_t);
  13727. for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) {
  13728. int32_t id = output_pos[i];
  13729. GGML_ASSERT((uint32_t) id < ctx->cparams.n_batch);
  13730. ctx->output_ids[id] = i;
  13731. }
  13732. ctx->n_outputs = n_outputs;
  13733. }
  13734. }
  13735. // set logits
  13736. {
  13737. size_t logits_size;
  13738. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  13739. GGML_ASSERT(ctx->logits_size >= logits_size);
  13740. if (logits_size) {
  13741. memcpy(ctx->logits, inp, logits_size * sizeof(float));
  13742. inp += logits_size * sizeof(float);
  13743. }
  13744. }
  13745. // set embeddings
  13746. {
  13747. size_t embeddings_size;
  13748. memcpy(&embeddings_size, inp, sizeof(embeddings_size)); inp += sizeof(embeddings_size);
  13749. GGML_ASSERT(ctx->embd_size >= embeddings_size);
  13750. if (embeddings_size) {
  13751. memcpy(ctx->embd, inp, embeddings_size * sizeof(float));
  13752. inp += embeddings_size * sizeof(float);
  13753. }
  13754. }
  13755. // set kv cache
  13756. {
  13757. const auto & kv_self = ctx->kv_self;
  13758. const auto & hparams = ctx->model.hparams;
  13759. const uint32_t n_layer = hparams.n_layer;
  13760. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  13761. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  13762. size_t kv_buf_size;
  13763. uint32_t kv_head;
  13764. uint32_t kv_size;
  13765. uint32_t kv_used;
  13766. uint32_t v_trans;
  13767. memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size);
  13768. memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head);
  13769. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  13770. memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used);
  13771. memcpy(&v_trans, inp, sizeof(v_trans)); inp += sizeof(v_trans);
  13772. GGML_ASSERT(kv_self.v_trans == (bool) v_trans); // incompatible V transposition
  13773. if (kv_self.size != kv_size) {
  13774. // the KV cache needs to be big enough to load all the KV cells from the saved state
  13775. GGML_ASSERT(kv_self.size >= kv_head);
  13776. LLAMA_LOG_INFO("%s: state contains %d KV cells, was saved with kv_size=%d, but is loaded with kv_size=%d (fine, but different)\n",
  13777. __func__, kv_head, kv_size, kv_self.size);
  13778. }
  13779. llama_kv_cache_clear(ctx);
  13780. if (kv_buf_size) {
  13781. const size_t pre_kv_buf_size = inp - src;
  13782. GGML_ASSERT(kv_self.total_size() >= kv_buf_size);
  13783. for (int il = 0; il < (int) n_layer; ++il) {
  13784. const size_t k_size = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*kv_head);
  13785. ggml_backend_tensor_set(kv_self.k_l[il], inp, 0, k_size);
  13786. inp += k_size;
  13787. if (kv_self.recurrent || !kv_self.v_trans) {
  13788. // v is contiguous for recurrent models
  13789. // TODO: use other tensors for state models than k and v
  13790. const size_t v_size = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*kv_head);
  13791. ggml_backend_tensor_set(kv_self.v_l[il], inp, 0, v_size);
  13792. inp += v_size;
  13793. continue;
  13794. }
  13795. // v is not contiguous, copy row by row
  13796. const size_t v_row_size = ggml_row_size(kv_self.v_l[il]->type, kv_head);
  13797. const size_t v_row_stride = ggml_row_size(kv_self.v_l[il]->type, kv_self.size);
  13798. for (int ir = 0; ir < (int) n_embd_v_gqa; ++ir) {
  13799. ggml_backend_tensor_set(kv_self.v_l[il], inp, ir*v_row_stride, v_row_size);
  13800. inp += v_row_size;
  13801. }
  13802. }
  13803. GGML_ASSERT(kv_buf_size == inp - src - pre_kv_buf_size);
  13804. }
  13805. ctx->kv_self.head = kv_head;
  13806. ctx->kv_self.used = kv_used;
  13807. for (uint32_t i = 0; i < kv_head; ++i) {
  13808. llama_pos pos;
  13809. size_t seq_id_size;
  13810. memcpy(&pos, inp, sizeof(pos)); inp += sizeof(pos);
  13811. memcpy(&seq_id_size, inp, sizeof(seq_id_size)); inp += sizeof(seq_id_size);
  13812. ctx->kv_self.cells[i].pos = pos;
  13813. llama_seq_id seq_id;
  13814. for (size_t j = 0; j < seq_id_size; ++j) {
  13815. memcpy(&seq_id, inp, sizeof(seq_id)); inp += sizeof(seq_id);
  13816. ctx->kv_self.cells[i].seq_id.insert(seq_id);
  13817. }
  13818. }
  13819. }
  13820. const size_t nread = inp - src;
  13821. const size_t max_size = llama_state_get_size(ctx);
  13822. GGML_ASSERT(nread <= max_size);
  13823. return nread;
  13824. }
  13825. static bool llama_state_load_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  13826. llama_file file(path_session, "rb");
  13827. // sanity checks
  13828. {
  13829. const uint32_t magic = file.read_u32();
  13830. const uint32_t version = file.read_u32();
  13831. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  13832. LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  13833. return false;
  13834. }
  13835. llama_hparams session_hparams;
  13836. file.read_raw(&session_hparams, sizeof(llama_hparams));
  13837. if (session_hparams != ctx->model.hparams) {
  13838. LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
  13839. return false;
  13840. }
  13841. }
  13842. // load the prompt
  13843. {
  13844. const uint32_t n_token_count = file.read_u32();
  13845. if (n_token_count > n_token_capacity) {
  13846. LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  13847. return false;
  13848. }
  13849. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  13850. *n_token_count_out = n_token_count;
  13851. }
  13852. // restore the context state
  13853. {
  13854. const size_t n_state_size_cur = file.size - file.tell();
  13855. const size_t n_state_size_max = llama_state_get_size(ctx);
  13856. if (n_state_size_cur > n_state_size_max) {
  13857. LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  13858. return false;
  13859. }
  13860. std::vector<uint8_t> state_data(n_state_size_max);
  13861. file.read_raw(state_data.data(), n_state_size_cur);
  13862. llama_state_set_data(ctx, state_data.data());
  13863. }
  13864. return true;
  13865. }
  13866. bool llama_state_load_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  13867. try {
  13868. return llama_state_load_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  13869. } catch (const std::exception & err) {
  13870. LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
  13871. return false;
  13872. }
  13873. }
  13874. static bool llama_state_save_file_internal(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  13875. llama_file file(path_session, "wb");
  13876. file.write_u32(LLAMA_SESSION_MAGIC);
  13877. file.write_u32(LLAMA_SESSION_VERSION);
  13878. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  13879. // save the prompt
  13880. file.write_u32((uint32_t) n_token_count);
  13881. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  13882. // save the context state using stream saving
  13883. llama_data_file_context data_ctx(&file);
  13884. llama_state_get_data_internal(ctx, &data_ctx);
  13885. return true;
  13886. }
  13887. bool llama_state_save_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  13888. try {
  13889. return llama_state_save_file_internal(ctx, path_session, tokens, n_token_count);
  13890. } catch (const std::exception & err) {
  13891. LLAMA_LOG_ERROR("error saving session file: %s\n", err.what());
  13892. return false;
  13893. }
  13894. }
  13895. size_t llama_state_seq_get_size(struct llama_context* ctx, llama_seq_id seq_id) {
  13896. // save the size of size_t as a uint32_t for safety check
  13897. const size_t size_t_size_size = sizeof(uint32_t);
  13898. // other values
  13899. const size_t s_cell_count_size = sizeof(uint32_t);
  13900. const size_t s_layer_count_size = sizeof(uint32_t);
  13901. const size_t n_embd_v_gqa_size = sizeof(uint32_t);
  13902. size_t s_cell_count = 0;
  13903. size_t s_cell_data_size = 0;
  13904. const auto & kv_self = ctx->kv_self;
  13905. const auto & hparams = ctx->model.hparams;
  13906. const uint32_t n_layer = hparams.n_layer;
  13907. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  13908. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  13909. for (uint32_t i = 0; i < kv_self.size; ++i) {
  13910. const auto & cell = kv_self.cells[i];
  13911. if (cell.seq_id.count(seq_id) > 0) {
  13912. ++s_cell_count;
  13913. s_cell_data_size += sizeof(llama_pos);
  13914. }
  13915. }
  13916. for (int il = 0; il < (int)n_layer; ++il) {
  13917. // types of keys and values
  13918. s_cell_data_size += sizeof(int32_t) * 2;
  13919. // k_size_row and v_size_el values of layer
  13920. s_cell_data_size += sizeof(size_t) * 2;
  13921. // keys
  13922. const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
  13923. s_cell_data_size += k_size_row * s_cell_count;
  13924. // values (transposed)
  13925. const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
  13926. s_cell_data_size += v_size_el * s_cell_count * n_embd_v_gqa;
  13927. }
  13928. const size_t s_total = (
  13929. size_t_size_size +
  13930. s_cell_count_size +
  13931. s_layer_count_size +
  13932. n_embd_v_gqa_size +
  13933. s_cell_data_size
  13934. );
  13935. return s_total;
  13936. }
  13937. static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llama_data_context & data_ctx, llama_seq_id seq_id) {
  13938. llama_synchronize(ctx);
  13939. const auto & kv_self = ctx->kv_self;
  13940. GGML_ASSERT(!kv_self.recurrent); // not implemented
  13941. // Save the size of size_t as a uint32_t for safety check
  13942. const uint32_t size_t_size = sizeof(size_t);
  13943. data_ctx.write(&size_t_size, sizeof(size_t_size));
  13944. std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
  13945. uint32_t cell_count = 0;
  13946. // Count the number of cells with the specified seq_id
  13947. // Find all the ranges of cells with this seq id
  13948. {
  13949. uint32_t cell_range_begin = kv_self.size;
  13950. for (uint32_t i = 0; i < kv_self.size; ++i) {
  13951. const auto & cell = kv_self.cells[i];
  13952. if (cell.has_seq_id(seq_id)) {
  13953. ++cell_count;
  13954. if (cell_range_begin == kv_self.size) {
  13955. cell_range_begin = i;
  13956. }
  13957. }
  13958. else {
  13959. if (cell_range_begin != kv_self.size) {
  13960. cell_ranges.emplace_back(cell_range_begin, i);
  13961. cell_range_begin = kv_self.size;
  13962. }
  13963. }
  13964. }
  13965. if (cell_range_begin != kv_self.size) {
  13966. cell_ranges.emplace_back(cell_range_begin, kv_self.size);
  13967. }
  13968. // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
  13969. uint32_t cell_count_check = 0;
  13970. for (const auto & range : cell_ranges) {
  13971. cell_count_check += range.second - range.first;
  13972. }
  13973. GGML_ASSERT(cell_count == cell_count_check);
  13974. }
  13975. // Write the cell count
  13976. data_ctx.write(&cell_count, sizeof(cell_count));
  13977. const auto & hparams = ctx->model.hparams;
  13978. const uint32_t n_layer = hparams.n_layer;
  13979. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  13980. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  13981. // Write the layer count
  13982. data_ctx.write(&n_layer, sizeof(n_layer));
  13983. // Write n_embd_v_gqa
  13984. data_ctx.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
  13985. // Iterate the ranges and write all the pos (this is the token position in the prompt)
  13986. for (const auto & range : cell_ranges) {
  13987. for (uint32_t i = range.first; i < range.second; ++i) {
  13988. const auto & cell = kv_self.cells[i];
  13989. data_ctx.write(&cell.pos, sizeof(cell.pos));
  13990. }
  13991. }
  13992. // Iterate and write all the keys first, each row is a cell
  13993. // Get whole range at a time
  13994. std::vector<uint8_t> tmp_buf;
  13995. for (int il = 0; il < (int)n_layer; ++il) {
  13996. // Write key type
  13997. const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type;
  13998. data_ctx.write(&k_type_i, sizeof(k_type_i));
  13999. // Write row size of key
  14000. const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
  14001. data_ctx.write(&k_size_row, sizeof(k_size_row));
  14002. // Read each range of cells of k_size length each into tmp_buf and write out
  14003. for (const auto & range : cell_ranges) {
  14004. const size_t range_size = range.second - range.first;
  14005. tmp_buf.resize(range_size * k_size_row);
  14006. ggml_backend_tensor_get(kv_self.k_l[il], tmp_buf.data(), range.first * k_size_row, range_size * k_size_row);
  14007. data_ctx.write(tmp_buf.data(), tmp_buf.size());
  14008. }
  14009. }
  14010. // TODO: simplify, reduce copy-paste
  14011. if (!kv_self.v_trans) {
  14012. for (int il = 0; il < (int)n_layer; ++il) {
  14013. // Write value type
  14014. const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
  14015. data_ctx.write(&v_type_i, sizeof(v_type_i));
  14016. // Write row size of value
  14017. const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
  14018. data_ctx.write(&v_size_row, sizeof(v_size_row));
  14019. // Read each range of cells of v_size length each into tmp_buf and write out
  14020. for (const auto & range : cell_ranges) {
  14021. const size_t range_size = range.second - range.first;
  14022. tmp_buf.resize(range_size * v_size_row);
  14023. ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), range.first * v_size_row, range_size * v_size_row);
  14024. data_ctx.write(tmp_buf.data(), tmp_buf.size());
  14025. }
  14026. }
  14027. } else {
  14028. // For the values, they are transposed, so we also need the element size and get the element ranges from each row
  14029. const uint32_t kv_size = kv_self.size;
  14030. for (int il = 0; il < (int)n_layer; ++il) {
  14031. // Write value type
  14032. const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
  14033. data_ctx.write(&v_type_i, sizeof(v_type_i));
  14034. // Write element size
  14035. const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
  14036. data_ctx.write(&v_size_el, sizeof(v_size_el));
  14037. // For each row, we get the element values of each cell
  14038. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  14039. // Read each range of cells of v_size_el length each into tmp_buf and write out
  14040. for (const auto & range : cell_ranges) {
  14041. const size_t range_size = range.second - range.first;
  14042. const size_t src_offset = (range.first + j * kv_size) * v_size_el;
  14043. tmp_buf.resize(range_size * v_size_el);
  14044. ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), src_offset, tmp_buf.size());
  14045. data_ctx.write(tmp_buf.data(), tmp_buf.size());
  14046. }
  14047. }
  14048. }
  14049. }
  14050. return data_ctx.get_size_written();
  14051. }
  14052. size_t llama_state_seq_get_data(struct llama_context* ctx, uint8_t* dst, llama_seq_id seq_id) {
  14053. llama_data_buffer_context data_ctx(dst);
  14054. return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
  14055. }
  14056. size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, llama_seq_id dest_seq_id) {
  14057. llama_synchronize(ctx);
  14058. auto & kv_self = ctx->kv_self;
  14059. GGML_ASSERT(!kv_self.recurrent); // not implemented
  14060. // Wipe the slot
  14061. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14062. const uint8_t * inp = src;
  14063. // Read size of size_t
  14064. uint32_t size_t_size;
  14065. memcpy(&size_t_size, inp, sizeof(size_t_size));
  14066. inp += sizeof(size_t_size);
  14067. if (size_t_size != sizeof(size_t)) {
  14068. LLAMA_LOG_ERROR("%s: size_t size mismatch\n", __func__);
  14069. return 0;
  14070. }
  14071. // Read the cell count
  14072. uint32_t cell_count;
  14073. memcpy(&cell_count, inp, sizeof(cell_count));
  14074. inp += sizeof(cell_count);
  14075. // Read the layer count
  14076. uint32_t n_layer_ref;
  14077. memcpy(&n_layer_ref, inp, sizeof(n_layer_ref));
  14078. inp += sizeof(n_layer_ref);
  14079. // Read n_embd_v_gqa
  14080. uint32_t n_embd_v_gqa_ref;
  14081. memcpy(&n_embd_v_gqa_ref, inp, sizeof(n_embd_v_gqa_ref));
  14082. inp += sizeof(n_embd_v_gqa_ref);
  14083. // Sanity check model compatibility
  14084. const auto & hparams = ctx->model.hparams;
  14085. const uint32_t n_layer = hparams.n_layer;
  14086. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  14087. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  14088. if (n_layer != n_layer_ref) {
  14089. LLAMA_LOG_ERROR("%s: mismatched n_layer (%d != %d)\n", __func__, n_layer, n_layer_ref);
  14090. return 0;
  14091. }
  14092. if (n_embd_v_gqa != n_embd_v_gqa_ref) {
  14093. LLAMA_LOG_ERROR("%s: mismatched n_embd_v_gqa (%d != %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref);
  14094. return 0;
  14095. }
  14096. // Allocate the new cells for the slot
  14097. if (cell_count) {
  14098. llama_batch batch = llama_batch_init(cell_count, 0, 1);
  14099. batch.n_tokens = cell_count;
  14100. for (uint32_t i = 0; i < cell_count; ++i) {
  14101. llama_pos pos;
  14102. memcpy(&pos, inp, sizeof(pos));
  14103. inp += sizeof(pos);
  14104. batch.pos[i] = pos;
  14105. batch.n_seq_id[i] = 1;
  14106. batch.seq_id[i][0] = dest_seq_id;
  14107. }
  14108. if (!llama_kv_cache_find_slot(kv_self, batch)) {
  14109. llama_batch_free(batch);
  14110. LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
  14111. return 0;
  14112. }
  14113. // DEBUG CHECK: kv_self.head should be our first cell, kv_self.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
  14114. // Assume that this is one contiguous block of cells
  14115. GGML_ASSERT(kv_self.head + cell_count <= kv_self.size);
  14116. GGML_ASSERT(kv_self.cells[kv_self.head].pos == batch.pos[0]);
  14117. GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].pos == batch.pos[cell_count - 1]);
  14118. GGML_ASSERT(kv_self.cells[kv_self.head].has_seq_id(dest_seq_id));
  14119. GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].has_seq_id(dest_seq_id));
  14120. // Cleanup
  14121. llama_batch_free(batch);
  14122. }
  14123. const uint32_t kv_size = kv_self.size;
  14124. const uint32_t kv_head = kv_self.head;
  14125. // For each layer, read the keys for each cell, one row is one cell, read as one contiguous blo
  14126. for (int il = 0; il < (int)n_layer; ++il) {
  14127. // Read type of key
  14128. int32_t k_type_i_ref;
  14129. memcpy(&k_type_i_ref, inp, sizeof(k_type_i_ref));
  14130. inp += sizeof(k_type_i_ref);
  14131. const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type;
  14132. if (k_type_i != k_type_i_ref) {
  14133. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14134. LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
  14135. return 0;
  14136. }
  14137. // Read row size of key
  14138. size_t k_size_row_ref;
  14139. memcpy(&k_size_row_ref, inp, sizeof(k_size_row_ref));
  14140. inp += sizeof(k_size_row_ref);
  14141. const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
  14142. if (k_size_row != k_size_row_ref) {
  14143. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14144. LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, k_size_row_ref, il);
  14145. return 0;
  14146. }
  14147. if (cell_count) {
  14148. // Read and set the keys for the whole cell range
  14149. ggml_backend_tensor_set(kv_self.k_l[il], inp, kv_head * k_size_row, cell_count * k_size_row);
  14150. inp += cell_count * k_size_row;
  14151. }
  14152. }
  14153. // TODO: simplify, reduce copy-paste
  14154. if (!kv_self.v_trans) {
  14155. for (int il = 0; il < (int)n_layer; ++il) {
  14156. // Read type of value
  14157. int32_t v_type_i_ref;
  14158. memcpy(&v_type_i_ref, inp, sizeof(v_type_i_ref));
  14159. inp += sizeof(v_type_i_ref);
  14160. const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
  14161. if (v_type_i != v_type_i_ref) {
  14162. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14163. LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
  14164. return 0;
  14165. }
  14166. // Read row size of value
  14167. size_t v_size_row_ref;
  14168. memcpy(&v_size_row_ref, inp, sizeof(v_size_row_ref));
  14169. inp += sizeof(v_size_row_ref);
  14170. const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
  14171. if (v_size_row != v_size_row_ref) {
  14172. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14173. LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, v_size_row_ref, il);
  14174. return 0;
  14175. }
  14176. if (cell_count) {
  14177. // Read and set the values for the whole cell range
  14178. ggml_backend_tensor_set(kv_self.v_l[il], inp, kv_head * v_size_row, cell_count * v_size_row);
  14179. inp += cell_count * v_size_row;
  14180. }
  14181. }
  14182. } else {
  14183. // For each layer, read the values for each cell (transposed)
  14184. for (int il = 0; il < (int)n_layer; ++il) {
  14185. // Read type of value
  14186. int32_t v_type_i_ref;
  14187. memcpy(&v_type_i_ref, inp, sizeof(v_type_i_ref));
  14188. inp += sizeof(v_type_i_ref);
  14189. const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
  14190. if (v_type_i != v_type_i_ref) {
  14191. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14192. LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
  14193. return 0;
  14194. }
  14195. // Read element size of value
  14196. size_t v_size_el_ref;
  14197. memcpy(&v_size_el_ref, inp, sizeof(v_size_el_ref));
  14198. inp += sizeof(v_size_el_ref);
  14199. const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
  14200. if (v_size_el != v_size_el_ref) {
  14201. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14202. LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, v_size_el_ref, il);
  14203. return 0;
  14204. }
  14205. if (cell_count) {
  14206. // For each row in the transposed matrix, read the values for the whole cell range
  14207. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  14208. const size_t dst_offset = (kv_head + j * kv_size) * v_size_el;
  14209. ggml_backend_tensor_set(kv_self.v_l[il], inp, dst_offset, cell_count * v_size_el);
  14210. inp += cell_count * v_size_el;
  14211. }
  14212. }
  14213. }
  14214. }
  14215. const size_t nread = inp - src;
  14216. return nread;
  14217. }
  14218. static size_t llama_state_seq_save_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
  14219. llama_file file(filepath, "wb");
  14220. file.write_u32(LLAMA_STATE_SEQ_MAGIC);
  14221. file.write_u32(LLAMA_STATE_SEQ_VERSION);
  14222. // save the prompt
  14223. file.write_u32((uint32_t)n_token_count);
  14224. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  14225. // save the context state using stream saving
  14226. llama_data_file_context data_ctx(&file);
  14227. llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
  14228. const size_t res = file.tell();
  14229. GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + data_ctx.get_size_written());
  14230. return res;
  14231. }
  14232. static size_t llama_state_seq_load_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  14233. llama_file file(filepath, "rb");
  14234. // version checks
  14235. {
  14236. const uint32_t magic = file.read_u32();
  14237. const uint32_t version = file.read_u32();
  14238. if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) {
  14239. LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version);
  14240. return 0;
  14241. }
  14242. }
  14243. // load the prompt
  14244. {
  14245. const uint32_t n_token_count = file.read_u32();
  14246. if (n_token_count > n_token_capacity) {
  14247. LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  14248. return 0;
  14249. }
  14250. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  14251. *n_token_count_out = n_token_count;
  14252. }
  14253. // restore the context state
  14254. {
  14255. const size_t state_size = file.size - file.tell();
  14256. std::vector<uint8_t> state_data(state_size);
  14257. file.read_raw(state_data.data(), state_size);
  14258. const size_t nread = llama_state_seq_set_data(ctx, state_data.data(), dest_seq_id);
  14259. if (!nread) {
  14260. LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__);
  14261. return 0;
  14262. }
  14263. GGML_ASSERT(nread <= state_size);
  14264. GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell());
  14265. }
  14266. return file.tell();
  14267. }
  14268. size_t llama_state_seq_save_file(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
  14269. try {
  14270. return llama_state_seq_save_file_internal(ctx, filepath, seq_id, tokens, n_token_count);
  14271. } catch (const std::exception & err) {
  14272. LLAMA_LOG_ERROR("error saving sequence state file: %s\n", err.what());
  14273. return 0;
  14274. }
  14275. }
  14276. size_t llama_state_seq_load_file(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  14277. try {
  14278. return llama_state_seq_load_file_internal(ctx, filepath, dest_seq_id, tokens_out, n_token_capacity, n_token_count_out);
  14279. } catch (const std::exception & err) {
  14280. LLAMA_LOG_ERROR("error loading sequence state file: %s\n", err.what());
  14281. return 0;
  14282. }
  14283. }
  14284. void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch) {
  14285. ctx->cparams.n_threads = n_threads;
  14286. ctx->cparams.n_threads_batch = n_threads_batch;
  14287. }
  14288. void llama_set_abort_callback(struct llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) {
  14289. ctx->abort_callback = abort_callback;
  14290. ctx->abort_callback_data = abort_callback_data;
  14291. }
  14292. void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn) {
  14293. ctx->cparams.causal_attn = causal_attn;
  14294. }
  14295. struct llama_batch llama_batch_get_one(
  14296. llama_token * tokens,
  14297. int32_t n_tokens,
  14298. llama_pos pos_0,
  14299. llama_seq_id seq_id) {
  14300. return {
  14301. /*n_tokens =*/ n_tokens,
  14302. /*tokens =*/ tokens,
  14303. /*embd =*/ nullptr,
  14304. /*pos =*/ nullptr,
  14305. /*n_seq_id =*/ nullptr,
  14306. /*seq_id =*/ nullptr,
  14307. /*logits =*/ nullptr,
  14308. /*all_pos_0 =*/ pos_0,
  14309. /*all_pos_1 =*/ 1,
  14310. /*all_seq_id =*/ seq_id,
  14311. };
  14312. }
  14313. struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
  14314. llama_batch batch = { 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, 0, 0, 0, };
  14315. if (embd) {
  14316. batch.embd = (float *) malloc(sizeof(float) * n_tokens_alloc * embd);
  14317. } else {
  14318. batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens_alloc);
  14319. }
  14320. batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens_alloc);
  14321. batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens_alloc);
  14322. batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * (n_tokens_alloc + 1));
  14323. for (int i = 0; i < n_tokens_alloc; ++i) {
  14324. batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
  14325. }
  14326. batch.seq_id[n_tokens_alloc] = nullptr;
  14327. batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens_alloc);
  14328. return batch;
  14329. }
  14330. void llama_batch_free(struct llama_batch batch) {
  14331. if (batch.token) free(batch.token);
  14332. if (batch.embd) free(batch.embd);
  14333. if (batch.pos) free(batch.pos);
  14334. if (batch.n_seq_id) free(batch.n_seq_id);
  14335. if (batch.seq_id) {
  14336. for (int i = 0; batch.seq_id[i] != nullptr; ++i) {
  14337. free(batch.seq_id[i]);
  14338. }
  14339. free(batch.seq_id);
  14340. }
  14341. if (batch.logits) free(batch.logits);
  14342. }
  14343. int32_t llama_decode(
  14344. struct llama_context * ctx,
  14345. struct llama_batch batch) {
  14346. const int ret = llama_decode_internal(*ctx, batch);
  14347. if (ret < 0) {
  14348. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  14349. }
  14350. return ret;
  14351. }
  14352. void llama_synchronize(struct llama_context * ctx) {
  14353. ggml_backend_sched_synchronize(ctx->sched);
  14354. // FIXME: if multiple single tokens are evaluated without a synchronization,
  14355. // the stats will be added to the prompt evaluation stats
  14356. // this should only happen when using batch size 1 to evaluate a batch
  14357. // add the evaluation to the stats
  14358. if (ctx->n_queued_tokens == 1) {
  14359. ctx->t_eval_us += ggml_time_us() - ctx->t_compute_start_us;
  14360. ctx->n_eval++;
  14361. } else if (ctx->n_queued_tokens > 1) {
  14362. ctx->t_p_eval_us += ggml_time_us() - ctx->t_compute_start_us;
  14363. ctx->n_p_eval += ctx->n_queued_tokens;
  14364. }
  14365. // get a more accurate load time, upon first eval
  14366. if (ctx->n_queued_tokens > 0 && !ctx->has_evaluated_once) {
  14367. ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
  14368. ctx->has_evaluated_once = true;
  14369. }
  14370. ctx->n_queued_tokens = 0;
  14371. ctx->t_compute_start_us = 0;
  14372. }
  14373. float * llama_get_logits(struct llama_context * ctx) {
  14374. llama_synchronize(ctx);
  14375. return ctx->logits;
  14376. }
  14377. float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
  14378. int32_t j = -1;
  14379. llama_synchronize(ctx);
  14380. try {
  14381. if (ctx->logits == nullptr) {
  14382. throw std::runtime_error("no logits");
  14383. }
  14384. if (i < 0) {
  14385. j = ctx->n_outputs + i;
  14386. if (j < 0) {
  14387. throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
  14388. }
  14389. } else if ((size_t) i >= ctx->output_ids.size()) {
  14390. throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size()));
  14391. } else {
  14392. j = ctx->output_ids[i];
  14393. }
  14394. if (j < 0) {
  14395. throw std::runtime_error(format("batch.logits[%d] != true", i));
  14396. }
  14397. if (j >= ctx->n_outputs) {
  14398. // This should not happen
  14399. throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
  14400. }
  14401. return ctx->logits + j*ctx->model.hparams.n_vocab;
  14402. } catch (const std::exception & err) {
  14403. LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what());
  14404. #ifndef NDEBUG
  14405. GGML_ASSERT(false);
  14406. #endif
  14407. return nullptr;
  14408. }
  14409. }
  14410. float * llama_get_embeddings(struct llama_context * ctx) {
  14411. llama_synchronize(ctx);
  14412. return ctx->embd;
  14413. }
  14414. float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
  14415. int32_t j = -1;
  14416. llama_synchronize(ctx);
  14417. try {
  14418. if (ctx->embd == nullptr) {
  14419. throw std::runtime_error("no embeddings");
  14420. }
  14421. if (i < 0) {
  14422. j = ctx->n_outputs + i;
  14423. if (j < 0) {
  14424. throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
  14425. }
  14426. } else if ((size_t) i >= ctx->output_ids.size()) {
  14427. throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size()));
  14428. } else {
  14429. j = ctx->output_ids[i];
  14430. }
  14431. if (j < 0) {
  14432. throw std::runtime_error(format("batch.logits[%d] != true", i));
  14433. }
  14434. if (j >= ctx->n_outputs) {
  14435. // This should not happen
  14436. throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
  14437. }
  14438. return ctx->embd + j*ctx->model.hparams.n_embd;
  14439. } catch (const std::exception & err) {
  14440. LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what());
  14441. #ifndef NDEBUG
  14442. GGML_ASSERT(false);
  14443. #endif
  14444. return nullptr;
  14445. }
  14446. }
  14447. float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id) {
  14448. llama_synchronize(ctx);
  14449. auto it = ctx->embd_seq.find(seq_id);
  14450. if (it == ctx->embd_seq.end()) {
  14451. return nullptr;
  14452. }
  14453. return it->second.data();
  14454. }
  14455. const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
  14456. GGML_ASSERT(model->vocab.type != LLAMA_VOCAB_TYPE_NONE);
  14457. return model->vocab.id_to_token[token].text.c_str();
  14458. }
  14459. float llama_token_get_score(const struct llama_model * model, llama_token token) {
  14460. GGML_ASSERT(model->vocab.type != LLAMA_VOCAB_TYPE_NONE);
  14461. return model->vocab.id_to_token[token].score;
  14462. }
  14463. llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token) {
  14464. GGML_ASSERT(model->vocab.type != LLAMA_VOCAB_TYPE_NONE);
  14465. return model->vocab.id_to_token[token].type;
  14466. }
  14467. bool llama_token_is_eog(const struct llama_model * model, llama_token token) {
  14468. return token != -1 && (
  14469. token == llama_token_eos(model) ||
  14470. token == llama_token_eot(model)
  14471. );
  14472. }
  14473. llama_token llama_token_bos(const struct llama_model * model) {
  14474. return model->vocab.special_bos_id;
  14475. }
  14476. llama_token llama_token_eos(const struct llama_model * model) {
  14477. return model->vocab.special_eos_id;
  14478. }
  14479. llama_token llama_token_cls(const struct llama_model * model) {
  14480. return model->vocab.special_cls_id;
  14481. }
  14482. llama_token llama_token_sep(const struct llama_model * model) {
  14483. return model->vocab.special_sep_id;
  14484. }
  14485. llama_token llama_token_nl(const struct llama_model * model) {
  14486. return model->vocab.linefeed_id;
  14487. }
  14488. int32_t llama_add_bos_token(const struct llama_model * model) {
  14489. return model->vocab.special_add_bos;
  14490. }
  14491. int32_t llama_add_eos_token(const struct llama_model * model) {
  14492. return model->vocab.special_add_eos;
  14493. }
  14494. llama_token llama_token_prefix(const struct llama_model * model) {
  14495. return model->vocab.special_prefix_id;
  14496. }
  14497. llama_token llama_token_middle(const struct llama_model * model) {
  14498. return model->vocab.special_middle_id;
  14499. }
  14500. llama_token llama_token_suffix(const struct llama_model * model) {
  14501. return model->vocab.special_suffix_id;
  14502. }
  14503. llama_token llama_token_eot(const struct llama_model * model) {
  14504. return model->vocab.special_eot_id;
  14505. }
  14506. int32_t llama_tokenize(
  14507. const struct llama_model * model,
  14508. const char * text,
  14509. int32_t text_len,
  14510. llama_token * tokens,
  14511. int32_t n_tokens_max,
  14512. bool add_special,
  14513. bool parse_special) {
  14514. auto res = llama_tokenize_internal(model->vocab, std::string(text, text_len), add_special, parse_special);
  14515. if (n_tokens_max < (int) res.size()) {
  14516. // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
  14517. return -((int) res.size());
  14518. }
  14519. for (size_t i = 0; i < res.size(); i++) {
  14520. tokens[i] = res[i];
  14521. }
  14522. return res.size();
  14523. }
  14524. static std::string llama_decode_text(const std::string & text) {
  14525. std::string decoded_text;
  14526. const auto cpts = unicode_cpts_from_utf8(text);
  14527. for (const auto cpt : cpts) {
  14528. decoded_text += unicode_utf8_to_byte(unicode_cpt_to_utf8(cpt));
  14529. }
  14530. return decoded_text;
  14531. }
  14532. // does not write null-terminator to buf
  14533. int32_t llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int32_t length, bool special) {
  14534. if (0 <= token && token < llama_n_vocab(model)) {
  14535. switch (llama_vocab_get_type(model->vocab)) {
  14536. case LLAMA_VOCAB_TYPE_WPM:
  14537. case LLAMA_VOCAB_TYPE_SPM: {
  14538. // NOTE: we accept all unsupported token types,
  14539. // suppressing them like CONTROL tokens.
  14540. if (llama_is_normal_token(model->vocab, token)) {
  14541. std::string result = model->vocab.id_to_token[token].text;
  14542. llama_unescape_whitespace(result);
  14543. if (length < (int) result.length()) {
  14544. return -(int) result.length();
  14545. }
  14546. memcpy(buf, result.c_str(), result.length());
  14547. return result.length();
  14548. } else if (
  14549. (llama_is_user_defined_token(model->vocab, token)) ||
  14550. (llama_is_control_token (model->vocab, token) && special)) {
  14551. std::string result = model->vocab.id_to_token[token].text;
  14552. if (length < (int) result.length()) {
  14553. return -(int) result.length();
  14554. }
  14555. memcpy(buf, result.c_str(), result.length());
  14556. return result.length();
  14557. } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
  14558. if (length < 3) {
  14559. return -3;
  14560. }
  14561. memcpy(buf, "\xe2\x96\x85", 3);
  14562. return 3;
  14563. } else if (llama_is_byte_token(model->vocab, token)) {
  14564. if (length < 1) {
  14565. return -1;
  14566. }
  14567. buf[0] = llama_token_to_byte(model->vocab, token);
  14568. return 1;
  14569. }
  14570. break;
  14571. }
  14572. case LLAMA_VOCAB_TYPE_BPE: {
  14573. // NOTE: we accept all unsupported token types,
  14574. // suppressing them like CONTROL tokens.
  14575. if (llama_is_normal_token(model->vocab, token)) {
  14576. std::string result = model->vocab.id_to_token[token].text;
  14577. result = llama_decode_text(result);
  14578. if (length < (int) result.length()) {
  14579. return -(int) result.length();
  14580. }
  14581. memcpy(buf, result.c_str(), result.length());
  14582. return result.length();
  14583. } else if (
  14584. (llama_is_user_defined_token(model->vocab, token)) ||
  14585. (llama_is_control_token (model->vocab, token) && special)) {
  14586. std::string result = model->vocab.id_to_token[token].text;
  14587. if (length < (int) result.length()) {
  14588. return -(int) result.length();
  14589. }
  14590. memcpy(buf, result.c_str(), result.length());
  14591. return result.length();
  14592. }
  14593. break;
  14594. }
  14595. default:
  14596. GGML_ASSERT(false);
  14597. }
  14598. }
  14599. return 0;
  14600. }
  14601. // trim whitespace from the beginning and end of a string
  14602. static std::string trim(const std::string & str) {
  14603. size_t start = 0;
  14604. size_t end = str.size();
  14605. while (start < end && isspace(str[start])) {
  14606. start += 1;
  14607. }
  14608. while (end > start && isspace(str[end - 1])) {
  14609. end -= 1;
  14610. }
  14611. return str.substr(start, end - start);
  14612. }
  14613. // Simple version of "llama_apply_chat_template" that only works with strings
  14614. // This function uses heuristic checks to determine commonly used template. It is not a jinja parser.
  14615. static int32_t llama_chat_apply_template_internal(
  14616. const std::string & tmpl,
  14617. const std::vector<const llama_chat_message *> & chat,
  14618. std::string & dest, bool add_ass) {
  14619. // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527
  14620. std::stringstream ss;
  14621. if (tmpl == "chatml" || tmpl.find("<|im_start|>") != std::string::npos) {
  14622. // chatml template
  14623. for (auto message : chat) {
  14624. ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n";
  14625. }
  14626. if (add_ass) {
  14627. ss << "<|im_start|>assistant\n";
  14628. }
  14629. } else if (tmpl == "llama2" || tmpl.find("[INST]") != std::string::npos) {
  14630. // llama2 template and its variants
  14631. // [variant] support system message
  14632. bool support_system_message = tmpl.find("<<SYS>>") != std::string::npos;
  14633. // [variant] space before + after response
  14634. bool space_around_response = tmpl.find("' ' + eos_token") != std::string::npos;
  14635. // [variant] add BOS inside history
  14636. bool add_bos_inside_history = tmpl.find("bos_token + '[INST]") != std::string::npos;
  14637. // [variant] trim spaces from the input message
  14638. bool strip_message = tmpl.find("content.strip()") != std::string::npos;
  14639. // construct the prompt
  14640. bool is_inside_turn = true; // skip BOS at the beginning
  14641. ss << "[INST] ";
  14642. for (auto message : chat) {
  14643. std::string content = strip_message ? trim(message->content) : message->content;
  14644. std::string role(message->role);
  14645. if (!is_inside_turn) {
  14646. is_inside_turn = true;
  14647. ss << (add_bos_inside_history ? "<s>[INST] " : "[INST] ");
  14648. }
  14649. if (role == "system") {
  14650. if (support_system_message) {
  14651. ss << "<<SYS>>\n" << content << "\n<</SYS>>\n\n";
  14652. } else {
  14653. // if the model does not support system message, we still include it in the first message, but without <<SYS>>
  14654. ss << content << "\n";
  14655. }
  14656. } else if (role == "user") {
  14657. ss << content << " [/INST]";
  14658. } else {
  14659. ss << (space_around_response ? " " : "") << content << (space_around_response ? " " : "") << "</s>";
  14660. is_inside_turn = false;
  14661. }
  14662. }
  14663. // llama2 templates seem to not care about "add_generation_prompt"
  14664. } else if (tmpl == "zephyr" || tmpl.find("<|user|>") != std::string::npos) {
  14665. // zephyr template
  14666. for (auto message : chat) {
  14667. ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n";
  14668. }
  14669. if (add_ass) {
  14670. ss << "<|assistant|>\n";
  14671. }
  14672. } else if (tmpl == "monarch" || tmpl.find("bos_token + message['role']") != std::string::npos) {
  14673. // mlabonne/AlphaMonarch-7B template (the <s> is included inside history)
  14674. for (auto message : chat) {
  14675. std::string bos = (message == chat.front()) ? "" : "<s>"; // skip BOS for first message
  14676. ss << bos << message->role << "\n" << message->content << "</s>\n";
  14677. }
  14678. if (add_ass) {
  14679. ss << "<s>assistant\n";
  14680. }
  14681. } else if (tmpl == "gemma" || tmpl.find("<start_of_turn>") != std::string::npos) {
  14682. // google/gemma-7b-it
  14683. std::string system_prompt = "";
  14684. for (auto message : chat) {
  14685. std::string role(message->role);
  14686. if (role == "system") {
  14687. // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken
  14688. system_prompt = trim(message->content);
  14689. continue;
  14690. }
  14691. // in gemma, "assistant" is "model"
  14692. role = role == "assistant" ? "model" : message->role;
  14693. ss << "<start_of_turn>" << role << "\n";
  14694. if (!system_prompt.empty() && role != "model") {
  14695. ss << system_prompt << "\n\n";
  14696. system_prompt = "";
  14697. }
  14698. ss << trim(message->content) << "<end_of_turn>\n";
  14699. }
  14700. if (add_ass) {
  14701. ss << "<start_of_turn>model\n";
  14702. }
  14703. } else if (tmpl == "orion" || tmpl.find("'\\n\\nAssistant: ' + eos_token") != std::string::npos) {
  14704. // OrionStarAI/Orion-14B-Chat
  14705. std::string system_prompt = "";
  14706. for (auto message : chat) {
  14707. std::string role(message->role);
  14708. if (role == "system") {
  14709. // there is no system message support, we will merge it with user prompt
  14710. system_prompt = message->content;
  14711. continue;
  14712. } else if (role == "user") {
  14713. ss << "Human: ";
  14714. if (!system_prompt.empty()) {
  14715. ss << system_prompt << "\n\n";
  14716. system_prompt = "";
  14717. }
  14718. ss << message->content << "\n\nAssistant: </s>";
  14719. } else {
  14720. ss << message->content << "</s>";
  14721. }
  14722. }
  14723. } else if (tmpl == "openchat" || tmpl.find("GPT4 Correct ") != std::string::npos) {
  14724. // openchat/openchat-3.5-0106,
  14725. for (auto message : chat) {
  14726. std::string role(message->role);
  14727. if (role == "system") {
  14728. ss << message->content << "<|end_of_turn|>";
  14729. } else {
  14730. role[0] = toupper(role[0]);
  14731. ss << "GPT4 Correct " << role << ": " << message->content << "<|end_of_turn|>";
  14732. }
  14733. }
  14734. if (add_ass) {
  14735. ss << "GPT4 Correct Assistant:";
  14736. }
  14737. } else if (tmpl == "vicuna" || tmpl == "vicuna-orca" || (tmpl.find("USER: ") != std::string::npos && tmpl.find("ASSISTANT: ") != std::string::npos)) {
  14738. // eachadea/vicuna-13b-1.1 (and Orca variant)
  14739. for (auto message : chat) {
  14740. std::string role(message->role);
  14741. if (role == "system") {
  14742. // Orca-Vicuna variant uses a system prefix
  14743. if (tmpl == "vicuna-orca" || tmpl.find("SYSTEM: ") != std::string::npos) {
  14744. ss << "SYSTEM: " << message->content << "\n";
  14745. } else {
  14746. ss << message->content << "\n\n";
  14747. }
  14748. } else if (role == "user") {
  14749. ss << "USER: " << message->content << "\n";
  14750. } else if (role == "assistant") {
  14751. ss << "ASSISTANT: " << message->content << "</s>\n";
  14752. }
  14753. }
  14754. if (add_ass) {
  14755. ss << "ASSISTANT:";
  14756. }
  14757. } else if (tmpl == "deepseek" || (tmpl.find("### Instruction:") != std::string::npos && tmpl.find("<|EOT|>") != std::string::npos)) {
  14758. // deepseek-ai/deepseek-coder-33b-instruct
  14759. for (auto message : chat) {
  14760. std::string role(message->role);
  14761. if (role == "system") {
  14762. ss << message->content;
  14763. } else if (role == "user") {
  14764. ss << "### Instruction:\n" << message->content << "\n";
  14765. } else if (role == "assistant") {
  14766. ss << "### Response:\n" << message->content << "\n<|EOT|>\n";
  14767. }
  14768. }
  14769. if (add_ass) {
  14770. ss << "### Response:\n";
  14771. }
  14772. } else if (tmpl == "command-r" || (tmpl.find("<|START_OF_TURN_TOKEN|>") != std::string::npos && tmpl.find("<|USER_TOKEN|>") != std::string::npos)) {
  14773. // CohereForAI/c4ai-command-r-plus
  14774. for (auto message : chat) {
  14775. std::string role(message->role);
  14776. if (role == "system") {
  14777. ss << "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
  14778. } else if (role == "user") {
  14779. ss << "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
  14780. } else if (role == "assistant") {
  14781. ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
  14782. }
  14783. }
  14784. if (add_ass) {
  14785. ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>";
  14786. }
  14787. } else if (tmpl == "llama3" || (tmpl.find("<|start_header_id|>") != std::string::npos && tmpl.find("<|end_header_id|>") != std::string::npos)) {
  14788. // Llama 3
  14789. for (auto message : chat) {
  14790. std::string role(message->role);
  14791. ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>";
  14792. }
  14793. if (add_ass) {
  14794. ss << "<|start_header_id|>assistant<|end_header_id|>\n\n";
  14795. }
  14796. } else if (tmpl == "phi3" || (tmpl.find("<|assistant|>") != std::string::npos && tmpl.find("<|end|>") != std::string::npos )) {
  14797. // Phi 3
  14798. for (auto message : chat) {
  14799. std::string role(message->role);
  14800. ss << "<|" << role << "|>\n" << trim(message->content) << "<|end|>\n";
  14801. }
  14802. if (add_ass) {
  14803. ss << "<|assistant|>\n";
  14804. }
  14805. } else {
  14806. // template not supported
  14807. return -1;
  14808. }
  14809. dest = ss.str();
  14810. return dest.size();
  14811. }
  14812. LLAMA_API int32_t llama_chat_apply_template(
  14813. const struct llama_model * model,
  14814. const char * tmpl,
  14815. const struct llama_chat_message * chat,
  14816. size_t n_msg,
  14817. bool add_ass,
  14818. char * buf,
  14819. int32_t length) {
  14820. std::string curr_tmpl(tmpl == nullptr ? "" : tmpl);
  14821. if (tmpl == nullptr) {
  14822. GGML_ASSERT(model != nullptr);
  14823. // load template from model
  14824. std::vector<char> model_template(2048, 0); // longest known template is about 1200 bytes
  14825. std::string template_key = "tokenizer.chat_template";
  14826. int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
  14827. if (res < 0) {
  14828. // worst case: there is no information about template, we will use chatml by default
  14829. curr_tmpl = "chatml"; // see llama_chat_apply_template_internal
  14830. } else {
  14831. curr_tmpl = std::string(model_template.data(), model_template.size());
  14832. }
  14833. }
  14834. // format the chat to string
  14835. std::vector<const llama_chat_message *> chat_vec;
  14836. chat_vec.resize(n_msg);
  14837. for (size_t i = 0; i < n_msg; i++) {
  14838. chat_vec[i] = &chat[i];
  14839. }
  14840. std::string formatted_chat;
  14841. int32_t res = llama_chat_apply_template_internal(curr_tmpl, chat_vec, formatted_chat, add_ass);
  14842. if (res < 0) {
  14843. return res;
  14844. }
  14845. if (buf && length > 0) {
  14846. strncpy(buf, formatted_chat.c_str(), length);
  14847. }
  14848. return res;
  14849. }
  14850. LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count) {
  14851. static const char * const SPLIT_PATH_FORMAT = "%s-%05d-of-%05d.gguf";
  14852. if (snprintf(split_path, maxlen, SPLIT_PATH_FORMAT, path_prefix, split_no + 1, split_count)) {
  14853. return strlen(split_path);
  14854. }
  14855. return 0;
  14856. }
  14857. int llama_split_prefix(char * dest, size_t maxlen, const char * split_path, int split_no, int split_count) {
  14858. std::string str_split_path(split_path);
  14859. char postfix[32];
  14860. snprintf(postfix, 32, "-%05d-of-%05d.gguf", split_no + 1, split_count);
  14861. std::string str_postfix(postfix);
  14862. // check if dest ends with postfix
  14863. int size_prefix = str_split_path.size() - str_postfix.size();
  14864. if (size_prefix > 0 && str_split_path.find(str_postfix, size_prefix) != std::string::npos) {
  14865. snprintf(dest, std::min((size_t) size_prefix + 1, maxlen), "%s", split_path);
  14866. return size_prefix;
  14867. }
  14868. return 0;
  14869. }
  14870. struct llama_timings llama_get_timings(struct llama_context * ctx) {
  14871. struct llama_timings result = {
  14872. /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
  14873. /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
  14874. /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
  14875. /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
  14876. /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
  14877. /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
  14878. /*.n_sample =*/ std::max(1, ctx->n_sample),
  14879. /*.n_p_eval =*/ std::max(0, ctx->n_p_eval),
  14880. /*.n_eval =*/ std::max(1, ctx->n_eval),
  14881. };
  14882. return result;
  14883. }
  14884. void llama_print_timings(struct llama_context * ctx) {
  14885. const llama_timings timings = llama_get_timings(ctx);
  14886. LLAMA_LOG_INFO("\n");
  14887. LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms);
  14888. LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  14889. __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
  14890. LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  14891. __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
  14892. LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  14893. __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
  14894. LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (timings.t_end_ms - timings.t_start_ms), (timings.n_p_eval + timings.n_eval));
  14895. }
  14896. void llama_reset_timings(struct llama_context * ctx) {
  14897. ctx->t_start_us = ggml_time_us();
  14898. ctx->t_sample_us = ctx->n_sample = 0;
  14899. ctx->t_eval_us = ctx->n_eval = 0;
  14900. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  14901. }
  14902. const char * llama_print_system_info(void) {
  14903. static std::string s;
  14904. s = "";
  14905. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  14906. s += "AVX_VNNI = " + std::to_string(ggml_cpu_has_avx_vnni()) + " | ";
  14907. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  14908. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  14909. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  14910. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  14911. s += "AVX512_BF16 = " + std::to_string(ggml_cpu_has_avx512_bf16()) + " | ";
  14912. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  14913. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  14914. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  14915. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  14916. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  14917. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  14918. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  14919. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  14920. s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
  14921. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  14922. s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
  14923. #ifdef GGML_USE_LLAMAFILE
  14924. s += "LLAMAFILE = 1 | ";
  14925. #else
  14926. s += "LLAMAFILE = 0 | ";
  14927. #endif
  14928. return s.c_str();
  14929. }
  14930. void llama_dump_timing_info_yaml(FILE * stream, const llama_context * ctx) {
  14931. fprintf(stream, "\n");
  14932. fprintf(stream, "###########\n");
  14933. fprintf(stream, "# Timings #\n");
  14934. fprintf(stream, "###########\n");
  14935. fprintf(stream, "\n");
  14936. fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
  14937. 1.0e-3 * ctx->t_eval_us / ctx->n_eval);
  14938. fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
  14939. 1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
  14940. fprintf(stream, "mst_sample: %.2f # ms / token during sampling\n",
  14941. 1.0e-3 * ctx->t_sample_us / ctx->n_sample);
  14942. fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
  14943. fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
  14944. fprintf(stream, "n_sample: %d # number of sampled tokens\n", ctx->n_sample);
  14945. fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
  14946. fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
  14947. fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
  14948. fprintf(stream, "t_sample_us: %" PRId64 " # total microseconds spent sampling\n", ctx->t_sample_us);
  14949. fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
  14950. 1.0e6 * ctx->n_eval / ctx->t_eval_us);
  14951. fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
  14952. 1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
  14953. fprintf(stream, "ts_sample: %.2f # tokens / second during sampling\n",
  14954. 1.0e6 * ctx->n_sample / ctx->t_sample_us);
  14955. }
  14956. // For internal test use
  14957. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
  14958. struct llama_context * ctx
  14959. ) {
  14960. return ctx->model.tensors_by_name;
  14961. }
  14962. void llama_log_set(ggml_log_callback log_callback, void * user_data) {
  14963. g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
  14964. g_state.log_callback_user_data = user_data;
  14965. #ifdef GGML_USE_METAL
  14966. ggml_backend_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
  14967. #elif defined(GGML_USE_CUDA)
  14968. ggml_backend_cuda_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
  14969. #endif
  14970. }
  14971. static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
  14972. va_list args_copy;
  14973. va_copy(args_copy, args);
  14974. char buffer[128];
  14975. int len = vsnprintf(buffer, 128, format, args);
  14976. if (len < 128) {
  14977. g_state.log_callback(level, buffer, g_state.log_callback_user_data);
  14978. } else {
  14979. char* buffer2 = new char[len+1];
  14980. vsnprintf(buffer2, len+1, format, args_copy);
  14981. buffer2[len] = 0;
  14982. g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
  14983. delete[] buffer2;
  14984. }
  14985. va_end(args_copy);
  14986. }
  14987. static void llama_log_internal(ggml_log_level level, const char * format, ...) {
  14988. va_list args;
  14989. va_start(args, format);
  14990. llama_log_internal_v(level, format, args);
  14991. va_end(args);
  14992. }
  14993. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
  14994. (void) level;
  14995. (void) user_data;
  14996. fputs(text, stderr);
  14997. fflush(stderr);
  14998. }