llama.cpp 754 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653
  1. #define LLAMA_API_INTERNAL
  2. #include "llama.h"
  3. #include "unicode.h"
  4. #include "ggml.h"
  5. #include "ggml-alloc.h"
  6. #include "ggml-backend.h"
  7. #ifdef GGML_USE_RPC
  8. # include "ggml-rpc.h"
  9. #endif
  10. #ifdef GGML_USE_CUDA
  11. # include "ggml-cuda.h"
  12. #elif defined(GGML_USE_VULKAN)
  13. # include "ggml-vulkan.h"
  14. #elif defined(GGML_USE_SYCL)
  15. # include "ggml-sycl.h"
  16. #elif defined(GGML_USE_KOMPUTE)
  17. # include "ggml-kompute.h"
  18. #endif
  19. #ifdef GGML_USE_METAL
  20. # include "ggml-metal.h"
  21. #endif
  22. // TODO: replace with ggml API call
  23. #define QK_K 256
  24. #ifdef __has_include
  25. #if __has_include(<unistd.h>)
  26. #include <unistd.h>
  27. #if defined(_POSIX_MAPPED_FILES)
  28. #include <sys/mman.h>
  29. #include <fcntl.h>
  30. #endif
  31. #if defined(_POSIX_MEMLOCK_RANGE)
  32. #include <sys/resource.h>
  33. #endif
  34. #endif
  35. #endif
  36. #if defined(_WIN32)
  37. #define WIN32_LEAN_AND_MEAN
  38. #ifndef NOMINMAX
  39. #define NOMINMAX
  40. #endif
  41. #include <windows.h>
  42. #ifndef PATH_MAX
  43. #define PATH_MAX MAX_PATH
  44. #endif
  45. #include <io.h>
  46. #endif
  47. #include <algorithm>
  48. #include <array>
  49. #include <cassert>
  50. #include <cctype>
  51. #include <cfloat>
  52. #include <cinttypes>
  53. #include <climits>
  54. #include <cmath>
  55. #include <cstdarg>
  56. #include <cstddef>
  57. #include <cstdint>
  58. #include <cstdio>
  59. #include <cstring>
  60. #include <ctime>
  61. #include <forward_list>
  62. #include <fstream>
  63. #include <functional>
  64. #include <future>
  65. #include <initializer_list>
  66. #include <locale>
  67. #include <map>
  68. #include <memory>
  69. #include <mutex>
  70. #include <numeric>
  71. #include <queue>
  72. #include <random>
  73. #include <regex>
  74. #include <set>
  75. #include <sstream>
  76. #include <thread>
  77. #include <type_traits>
  78. #include <unordered_map>
  79. #if defined(_MSC_VER)
  80. #pragma warning(disable: 4244 4267) // possible loss of data
  81. #endif
  82. #ifdef __GNUC__
  83. #ifdef __MINGW32__
  84. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  85. #else
  86. #define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  87. #endif
  88. #else
  89. #define LLAMA_ATTRIBUTE_FORMAT(...)
  90. #endif
  91. #define LLAMA_MAX_NODES 8192
  92. #define LLAMA_MAX_EXPERTS 160
  93. //
  94. // logging
  95. //
  96. LLAMA_ATTRIBUTE_FORMAT(2, 3)
  97. static void llama_log_internal (ggml_log_level level, const char * format, ...);
  98. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
  99. #define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
  100. #define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
  101. #define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  102. //
  103. // helpers
  104. //
  105. static size_t utf8_len(char src) {
  106. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  107. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  108. return lookup[highbits];
  109. }
  110. static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
  111. std::string result;
  112. for (size_t pos = 0; ; pos += search.length()) {
  113. auto new_pos = s.find(search, pos);
  114. if (new_pos == std::string::npos) {
  115. result += s.substr(pos, s.size() - pos);
  116. break;
  117. }
  118. result += s.substr(pos, new_pos - pos) + replace;
  119. pos = new_pos;
  120. }
  121. s = std::move(result);
  122. }
  123. static bool is_float_close(float a, float b, float abs_tol) {
  124. // Check for non-negative tolerance
  125. if (abs_tol < 0.0) {
  126. throw std::invalid_argument("Tolerance must be non-negative");
  127. }
  128. // Exact equality check
  129. if (a == b) {
  130. return true;
  131. }
  132. // Check for infinities
  133. if (std::isinf(a) || std::isinf(b)) {
  134. return false;
  135. }
  136. // Regular comparison using the provided absolute tolerance
  137. return std::fabs(b - a) <= abs_tol;
  138. }
  139. static void zeros(std::ofstream & file, size_t n) {
  140. char zero = 0;
  141. for (size_t i = 0; i < n; ++i) {
  142. file.write(&zero, 1);
  143. }
  144. }
  145. LLAMA_ATTRIBUTE_FORMAT(1, 2)
  146. static std::string format(const char * fmt, ...) {
  147. va_list ap;
  148. va_list ap2;
  149. va_start(ap, fmt);
  150. va_copy(ap2, ap);
  151. int size = vsnprintf(NULL, 0, fmt, ap);
  152. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  153. std::vector<char> buf(size + 1);
  154. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  155. GGML_ASSERT(size2 == size);
  156. va_end(ap2);
  157. va_end(ap);
  158. return std::string(buf.data(), size);
  159. }
  160. //
  161. // gguf constants (sync with gguf.py)
  162. //
  163. enum llm_arch {
  164. LLM_ARCH_LLAMA,
  165. LLM_ARCH_FALCON,
  166. LLM_ARCH_BAICHUAN,
  167. LLM_ARCH_GROK,
  168. LLM_ARCH_GPT2,
  169. LLM_ARCH_GPTJ,
  170. LLM_ARCH_GPTNEOX,
  171. LLM_ARCH_MPT,
  172. LLM_ARCH_STARCODER,
  173. LLM_ARCH_REFACT,
  174. LLM_ARCH_BERT,
  175. LLM_ARCH_NOMIC_BERT,
  176. LLM_ARCH_JINA_BERT_V2,
  177. LLM_ARCH_BLOOM,
  178. LLM_ARCH_STABLELM,
  179. LLM_ARCH_QWEN,
  180. LLM_ARCH_QWEN2,
  181. LLM_ARCH_QWEN2MOE,
  182. LLM_ARCH_PHI2,
  183. LLM_ARCH_PHI3,
  184. LLM_ARCH_PLAMO,
  185. LLM_ARCH_CODESHELL,
  186. LLM_ARCH_ORION,
  187. LLM_ARCH_INTERNLM2,
  188. LLM_ARCH_MINICPM,
  189. LLM_ARCH_GEMMA,
  190. LLM_ARCH_STARCODER2,
  191. LLM_ARCH_MAMBA,
  192. LLM_ARCH_XVERSE,
  193. LLM_ARCH_COMMAND_R,
  194. LLM_ARCH_DBRX,
  195. LLM_ARCH_OLMO,
  196. LLM_ARCH_ARCTIC,
  197. LLM_ARCH_DEEPSEEK2,
  198. LLM_ARCH_UNKNOWN,
  199. };
  200. static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
  201. { LLM_ARCH_LLAMA, "llama" },
  202. { LLM_ARCH_FALCON, "falcon" },
  203. { LLM_ARCH_GROK, "grok" },
  204. { LLM_ARCH_GPT2, "gpt2" },
  205. { LLM_ARCH_GPTJ, "gptj" },
  206. { LLM_ARCH_GPTNEOX, "gptneox" },
  207. { LLM_ARCH_MPT, "mpt" },
  208. { LLM_ARCH_BAICHUAN, "baichuan" },
  209. { LLM_ARCH_STARCODER, "starcoder" },
  210. { LLM_ARCH_REFACT, "refact" },
  211. { LLM_ARCH_BERT, "bert" },
  212. { LLM_ARCH_NOMIC_BERT, "nomic-bert" },
  213. { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" },
  214. { LLM_ARCH_BLOOM, "bloom" },
  215. { LLM_ARCH_STABLELM, "stablelm" },
  216. { LLM_ARCH_QWEN, "qwen" },
  217. { LLM_ARCH_QWEN2, "qwen2" },
  218. { LLM_ARCH_QWEN2MOE, "qwen2moe" },
  219. { LLM_ARCH_PHI2, "phi2" },
  220. { LLM_ARCH_PHI3, "phi3" },
  221. { LLM_ARCH_PLAMO, "plamo" },
  222. { LLM_ARCH_CODESHELL, "codeshell" },
  223. { LLM_ARCH_ORION, "orion" },
  224. { LLM_ARCH_INTERNLM2, "internlm2" },
  225. { LLM_ARCH_MINICPM, "minicpm" },
  226. { LLM_ARCH_GEMMA, "gemma" },
  227. { LLM_ARCH_STARCODER2, "starcoder2" },
  228. { LLM_ARCH_MAMBA, "mamba" },
  229. { LLM_ARCH_XVERSE, "xverse" },
  230. { LLM_ARCH_COMMAND_R, "command-r" },
  231. { LLM_ARCH_DBRX, "dbrx" },
  232. { LLM_ARCH_OLMO, "olmo" },
  233. { LLM_ARCH_ARCTIC, "arctic" },
  234. { LLM_ARCH_DEEPSEEK2, "deepseek2" },
  235. { LLM_ARCH_UNKNOWN, "(unknown)" },
  236. };
  237. enum llm_kv {
  238. LLM_KV_GENERAL_ARCHITECTURE,
  239. LLM_KV_GENERAL_QUANTIZATION_VERSION,
  240. LLM_KV_GENERAL_ALIGNMENT,
  241. LLM_KV_GENERAL_NAME,
  242. LLM_KV_GENERAL_AUTHOR,
  243. LLM_KV_GENERAL_VERSION,
  244. LLM_KV_GENERAL_URL,
  245. LLM_KV_GENERAL_DESCRIPTION,
  246. LLM_KV_GENERAL_LICENSE,
  247. LLM_KV_GENERAL_SOURCE_URL,
  248. LLM_KV_GENERAL_SOURCE_HF_REPO,
  249. LLM_KV_VOCAB_SIZE,
  250. LLM_KV_CONTEXT_LENGTH,
  251. LLM_KV_EMBEDDING_LENGTH,
  252. LLM_KV_BLOCK_COUNT,
  253. LLM_KV_LEADING_DENSE_BLOCK_COUNT,
  254. LLM_KV_FEED_FORWARD_LENGTH,
  255. LLM_KV_EXPERT_FEED_FORWARD_LENGTH,
  256. LLM_KV_USE_PARALLEL_RESIDUAL,
  257. LLM_KV_TENSOR_DATA_LAYOUT,
  258. LLM_KV_EXPERT_COUNT,
  259. LLM_KV_EXPERT_USED_COUNT,
  260. LLM_KV_EXPERT_SHARED_COUNT,
  261. LLM_KV_EXPERT_WEIGHTS_SCALE,
  262. LLM_KV_POOLING_TYPE,
  263. LLM_KV_LOGIT_SCALE,
  264. LLM_KV_ATTENTION_HEAD_COUNT,
  265. LLM_KV_ATTENTION_HEAD_COUNT_KV,
  266. LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
  267. LLM_KV_ATTENTION_CLAMP_KQV,
  268. LLM_KV_ATTENTION_KEY_LENGTH,
  269. LLM_KV_ATTENTION_VALUE_LENGTH,
  270. LLM_KV_ATTENTION_LAYERNORM_EPS,
  271. LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
  272. LLM_KV_ATTENTION_CAUSAL,
  273. LLM_KV_ATTENTION_Q_LORA_RANK,
  274. LLM_KV_ATTENTION_KV_LORA_RANK,
  275. LLM_KV_ROPE_DIMENSION_COUNT,
  276. LLM_KV_ROPE_FREQ_BASE,
  277. LLM_KV_ROPE_SCALE_LINEAR,
  278. LLM_KV_ROPE_SCALING_TYPE,
  279. LLM_KV_ROPE_SCALING_FACTOR,
  280. LLM_KV_ROPE_SCALING_ATTN_FACTOR,
  281. LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
  282. LLM_KV_ROPE_SCALING_FINETUNED,
  283. LLM_KV_ROPE_SCALING_YARN_LOG_MUL,
  284. LLM_KV_SPLIT_NO,
  285. LLM_KV_SPLIT_COUNT,
  286. LLM_KV_SPLIT_TENSORS_COUNT,
  287. LLM_KV_SSM_INNER_SIZE,
  288. LLM_KV_SSM_CONV_KERNEL,
  289. LLM_KV_SSM_STATE_SIZE,
  290. LLM_KV_SSM_TIME_STEP_RANK,
  291. LLM_KV_TOKENIZER_MODEL,
  292. LLM_KV_TOKENIZER_PRE,
  293. LLM_KV_TOKENIZER_LIST,
  294. LLM_KV_TOKENIZER_TOKEN_TYPE,
  295. LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,
  296. LLM_KV_TOKENIZER_SCORES,
  297. LLM_KV_TOKENIZER_MERGES,
  298. LLM_KV_TOKENIZER_BOS_ID,
  299. LLM_KV_TOKENIZER_EOS_ID,
  300. LLM_KV_TOKENIZER_UNK_ID,
  301. LLM_KV_TOKENIZER_SEP_ID,
  302. LLM_KV_TOKENIZER_PAD_ID,
  303. LLM_KV_TOKENIZER_CLS_ID,
  304. LLM_KV_TOKENIZER_MASK_ID,
  305. LLM_KV_TOKENIZER_ADD_BOS,
  306. LLM_KV_TOKENIZER_ADD_EOS,
  307. LLM_KV_TOKENIZER_ADD_PREFIX,
  308. LLM_KV_TOKENIZER_HF_JSON,
  309. LLM_KV_TOKENIZER_RWKV,
  310. LLM_KV_TOKENIZER_PREFIX_ID,
  311. LLM_KV_TOKENIZER_SUFFIX_ID,
  312. LLM_KV_TOKENIZER_MIDDLE_ID,
  313. LLM_KV_TOKENIZER_EOT_ID,
  314. };
  315. static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
  316. { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
  317. { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
  318. { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
  319. { LLM_KV_GENERAL_NAME, "general.name" },
  320. { LLM_KV_GENERAL_AUTHOR, "general.author" },
  321. { LLM_KV_GENERAL_VERSION, "general.version" },
  322. { LLM_KV_GENERAL_URL, "general.url" },
  323. { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
  324. { LLM_KV_GENERAL_LICENSE, "general.license" },
  325. { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" },
  326. { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" },
  327. { LLM_KV_VOCAB_SIZE, "%s.vocab_size" },
  328. { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
  329. { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
  330. { LLM_KV_BLOCK_COUNT, "%s.block_count" },
  331. { LLM_KV_LEADING_DENSE_BLOCK_COUNT, "%s.leading_dense_block_count" },
  332. { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
  333. { LLM_KV_EXPERT_FEED_FORWARD_LENGTH, "%s.expert_feed_forward_length" },
  334. { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
  335. { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
  336. { LLM_KV_EXPERT_COUNT, "%s.expert_count" },
  337. { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
  338. { LLM_KV_EXPERT_SHARED_COUNT, "%s.expert_shared_count" },
  339. { LLM_KV_EXPERT_WEIGHTS_SCALE, "%s.expert_weights_scale" },
  340. { LLM_KV_POOLING_TYPE , "%s.pooling_type" },
  341. { LLM_KV_LOGIT_SCALE, "%s.logit_scale" },
  342. { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
  343. { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
  344. { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
  345. { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
  346. { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" },
  347. { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" },
  348. { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
  349. { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
  350. { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" },
  351. { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" },
  352. { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" },
  353. { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
  354. { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
  355. { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
  356. { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" },
  357. { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" },
  358. { LLM_KV_ROPE_SCALING_ATTN_FACTOR, "%s.rope.scaling.attn_factor" },
  359. { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
  360. { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" },
  361. { LLM_KV_ROPE_SCALING_YARN_LOG_MUL, "%s.rope.scaling.yarn_log_multiplier" },
  362. { LLM_KV_SPLIT_NO, "split.no" },
  363. { LLM_KV_SPLIT_COUNT, "split.count" },
  364. { LLM_KV_SPLIT_TENSORS_COUNT, "split.tensors.count" },
  365. { LLM_KV_SSM_CONV_KERNEL, "%s.ssm.conv_kernel" },
  366. { LLM_KV_SSM_INNER_SIZE, "%s.ssm.inner_size" },
  367. { LLM_KV_SSM_STATE_SIZE, "%s.ssm.state_size" },
  368. { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" },
  369. { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
  370. { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" },
  371. { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
  372. { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
  373. { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, "tokenizer.ggml.token_type_count" },
  374. { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
  375. { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
  376. { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
  377. { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
  378. { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
  379. { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
  380. { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
  381. { LLM_KV_TOKENIZER_CLS_ID, "tokenizer.ggml.cls_token_id" },
  382. { LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" },
  383. { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
  384. { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
  385. { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" },
  386. { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
  387. { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
  388. { LLM_KV_TOKENIZER_PREFIX_ID, "tokenizer.ggml.prefix_token_id" },
  389. { LLM_KV_TOKENIZER_SUFFIX_ID, "tokenizer.ggml.suffix_token_id" },
  390. { LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" },
  391. { LLM_KV_TOKENIZER_EOT_ID, "tokenizer.ggml.eot_token_id" },
  392. };
  393. struct LLM_KV {
  394. LLM_KV(llm_arch arch) : arch(arch) {}
  395. llm_arch arch;
  396. std::string operator()(llm_kv kv) const {
  397. return ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
  398. }
  399. };
  400. enum llm_tensor {
  401. LLM_TENSOR_TOKEN_EMBD,
  402. LLM_TENSOR_TOKEN_EMBD_NORM,
  403. LLM_TENSOR_TOKEN_TYPES,
  404. LLM_TENSOR_POS_EMBD,
  405. LLM_TENSOR_OUTPUT,
  406. LLM_TENSOR_OUTPUT_NORM,
  407. LLM_TENSOR_ROPE_FREQS,
  408. LLM_TENSOR_ROPE_FACTORS_LONG,
  409. LLM_TENSOR_ROPE_FACTORS_SHORT,
  410. LLM_TENSOR_ATTN_Q,
  411. LLM_TENSOR_ATTN_K,
  412. LLM_TENSOR_ATTN_V,
  413. LLM_TENSOR_ATTN_QKV,
  414. LLM_TENSOR_ATTN_OUT,
  415. LLM_TENSOR_ATTN_NORM,
  416. LLM_TENSOR_ATTN_NORM_2,
  417. LLM_TENSOR_ATTN_OUT_NORM,
  418. LLM_TENSOR_ATTN_ROT_EMBD,
  419. LLM_TENSOR_FFN_GATE_INP,
  420. LLM_TENSOR_FFN_GATE_INP_SHEXP,
  421. LLM_TENSOR_FFN_NORM,
  422. LLM_TENSOR_FFN_GATE,
  423. LLM_TENSOR_FFN_DOWN,
  424. LLM_TENSOR_FFN_UP,
  425. LLM_TENSOR_FFN_ACT,
  426. LLM_TENSOR_FFN_DOWN_EXP, // split experts for backward compatibility
  427. LLM_TENSOR_FFN_GATE_EXP,
  428. LLM_TENSOR_FFN_UP_EXP,
  429. LLM_TENSOR_FFN_NORM_EXPS,
  430. LLM_TENSOR_FFN_DOWN_EXPS, // merged experts
  431. LLM_TENSOR_FFN_GATE_EXPS,
  432. LLM_TENSOR_FFN_UP_EXPS,
  433. LLM_TENSOR_FFN_DOWN_SHEXP,
  434. LLM_TENSOR_FFN_GATE_SHEXP,
  435. LLM_TENSOR_FFN_UP_SHEXP,
  436. LLM_TENSOR_ATTN_Q_NORM,
  437. LLM_TENSOR_ATTN_K_NORM,
  438. LLM_TENSOR_LAYER_OUT_NORM,
  439. LLM_TENSOR_SSM_IN,
  440. LLM_TENSOR_SSM_CONV1D,
  441. LLM_TENSOR_SSM_X,
  442. LLM_TENSOR_SSM_DT,
  443. LLM_TENSOR_SSM_A,
  444. LLM_TENSOR_SSM_D,
  445. LLM_TENSOR_SSM_OUT,
  446. LLM_TENSOR_ATTN_Q_A,
  447. LLM_TENSOR_ATTN_Q_B,
  448. LLM_TENSOR_ATTN_KV_A_MQA,
  449. LLM_TENSOR_ATTN_KV_B,
  450. LLM_TENSOR_ATTN_Q_A_NORM,
  451. LLM_TENSOR_ATTN_KV_A_NORM,
  452. };
  453. static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
  454. {
  455. LLM_ARCH_LLAMA,
  456. {
  457. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  458. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  459. { LLM_TENSOR_OUTPUT, "output" },
  460. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  461. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  462. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  463. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  464. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  465. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  466. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  467. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  468. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  469. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  470. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  471. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  472. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  473. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  474. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  475. { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
  476. { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
  477. { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
  478. },
  479. },
  480. {
  481. LLM_ARCH_BAICHUAN,
  482. {
  483. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  484. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  485. { LLM_TENSOR_OUTPUT, "output" },
  486. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  487. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  488. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  489. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  490. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  491. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  492. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  493. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  494. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  495. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  496. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  497. },
  498. },
  499. {
  500. LLM_ARCH_FALCON,
  501. {
  502. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  503. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  504. { LLM_TENSOR_OUTPUT, "output" },
  505. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  506. { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
  507. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  508. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  509. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  510. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  511. },
  512. },
  513. {
  514. LLM_ARCH_GROK,
  515. {
  516. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  517. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  518. { LLM_TENSOR_OUTPUT, "output" },
  519. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  520. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  521. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  522. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  523. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  524. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  525. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  526. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  527. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  528. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  529. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  530. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  531. { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
  532. { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
  533. { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
  534. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  535. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  536. },
  537. },
  538. {
  539. LLM_ARCH_GPT2,
  540. {
  541. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  542. { LLM_TENSOR_POS_EMBD, "position_embd" },
  543. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  544. { LLM_TENSOR_OUTPUT, "output" },
  545. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  546. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  547. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  548. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  549. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  550. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  551. },
  552. },
  553. {
  554. LLM_ARCH_GPTJ,
  555. {
  556. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  557. },
  558. },
  559. {
  560. LLM_ARCH_GPTNEOX,
  561. {
  562. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  563. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  564. { LLM_TENSOR_OUTPUT, "output" },
  565. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  566. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  567. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  568. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  569. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  570. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  571. },
  572. },
  573. {
  574. LLM_ARCH_MPT,
  575. {
  576. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  577. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  578. { LLM_TENSOR_OUTPUT, "output"},
  579. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  580. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  581. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  582. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  583. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  584. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  585. { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" },
  586. { LLM_TENSOR_POS_EMBD, "position_embd" },
  587. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
  588. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
  589. },
  590. },
  591. {
  592. LLM_ARCH_STARCODER,
  593. {
  594. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  595. { LLM_TENSOR_POS_EMBD, "position_embd" },
  596. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  597. { LLM_TENSOR_OUTPUT, "output" },
  598. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  599. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  600. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  601. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  602. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  603. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  604. },
  605. },
  606. {
  607. LLM_ARCH_REFACT,
  608. {
  609. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  610. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  611. { LLM_TENSOR_OUTPUT, "output" },
  612. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  613. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  614. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  615. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  616. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  617. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  618. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  619. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  620. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  621. },
  622. },
  623. {
  624. LLM_ARCH_BERT,
  625. {
  626. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  627. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  628. { LLM_TENSOR_TOKEN_TYPES, "token_types" },
  629. { LLM_TENSOR_POS_EMBD, "position_embd" },
  630. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  631. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  632. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  633. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  634. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  635. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  636. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  637. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  638. },
  639. },
  640. {
  641. LLM_ARCH_NOMIC_BERT,
  642. {
  643. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  644. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  645. { LLM_TENSOR_TOKEN_TYPES, "token_types" },
  646. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  647. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  648. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  649. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  650. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  651. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  652. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  653. },
  654. },
  655. {
  656. LLM_ARCH_JINA_BERT_V2,
  657. {
  658. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  659. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  660. { LLM_TENSOR_TOKEN_TYPES, "token_types" },
  661. { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
  662. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  663. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  664. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
  665. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  666. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
  667. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  668. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  669. { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
  670. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  671. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  672. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  673. },
  674. },
  675. {
  676. LLM_ARCH_BLOOM,
  677. {
  678. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  679. { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
  680. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  681. { LLM_TENSOR_OUTPUT, "output" },
  682. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  683. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  684. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  685. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  686. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  687. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  688. },
  689. },
  690. {
  691. LLM_ARCH_STABLELM,
  692. {
  693. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  694. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  695. { LLM_TENSOR_OUTPUT, "output" },
  696. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  697. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  698. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  699. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  700. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  701. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  702. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  703. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  704. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  705. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  706. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
  707. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
  708. },
  709. },
  710. {
  711. LLM_ARCH_QWEN,
  712. {
  713. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  714. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  715. { LLM_TENSOR_OUTPUT, "output" },
  716. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  717. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  718. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  719. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  720. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  721. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  722. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  723. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  724. },
  725. },
  726. {
  727. LLM_ARCH_QWEN2,
  728. {
  729. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  730. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  731. { LLM_TENSOR_OUTPUT, "output" },
  732. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  733. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  734. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  735. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  736. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  737. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  738. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  739. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  740. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  741. },
  742. },
  743. {
  744. LLM_ARCH_QWEN2MOE,
  745. {
  746. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  747. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  748. { LLM_TENSOR_OUTPUT, "output" },
  749. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  750. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  751. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  752. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  753. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  754. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  755. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  756. { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
  757. { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
  758. { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
  759. { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
  760. { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
  761. { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
  762. { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
  763. },
  764. },
  765. {
  766. LLM_ARCH_PHI2,
  767. {
  768. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  769. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  770. { LLM_TENSOR_OUTPUT, "output" },
  771. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  772. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  773. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  774. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  775. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  776. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  777. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  778. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  779. },
  780. },
  781. {
  782. LLM_ARCH_PHI3,
  783. {
  784. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  785. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  786. { LLM_TENSOR_OUTPUT, "output" },
  787. { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
  788. { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
  789. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  790. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  791. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  792. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  793. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  794. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  795. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  796. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  797. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  798. },
  799. },
  800. {
  801. LLM_ARCH_PLAMO,
  802. {
  803. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  804. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  805. { LLM_TENSOR_OUTPUT, "output" },
  806. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  807. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  808. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  809. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  810. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  811. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  812. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  813. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  814. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  815. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  816. },
  817. },
  818. {
  819. LLM_ARCH_CODESHELL,
  820. {
  821. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  822. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  823. { LLM_TENSOR_OUTPUT, "output" },
  824. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  825. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  826. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  827. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  828. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  829. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  830. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  831. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  832. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  833. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  834. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  835. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  836. },
  837. },
  838. {
  839. LLM_ARCH_ORION,
  840. {
  841. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  842. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  843. { LLM_TENSOR_OUTPUT, "output" },
  844. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  845. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  846. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  847. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  848. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  849. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  850. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  851. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  852. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  853. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  854. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  855. },
  856. },
  857. {
  858. LLM_ARCH_INTERNLM2,
  859. {
  860. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  861. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  862. { LLM_TENSOR_OUTPUT, "output" },
  863. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  864. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  865. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  866. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  867. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  868. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  869. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  870. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  871. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  872. },
  873. },
  874. {
  875. LLM_ARCH_MINICPM,
  876. {
  877. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  878. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  879. { LLM_TENSOR_OUTPUT, "output" },
  880. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  881. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  882. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  883. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  884. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  885. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  886. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  887. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  888. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  889. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  890. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  891. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  892. { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
  893. { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
  894. { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
  895. },
  896. },
  897. {
  898. LLM_ARCH_GEMMA,
  899. {
  900. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  901. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  902. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  903. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  904. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  905. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  906. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  907. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  908. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  909. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  910. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  911. },
  912. },
  913. {
  914. LLM_ARCH_STARCODER2,
  915. {
  916. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  917. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  918. { LLM_TENSOR_OUTPUT, "output" },
  919. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  920. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  921. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  922. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  923. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  924. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  925. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  926. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  927. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  928. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  929. },
  930. },
  931. {
  932. LLM_ARCH_MAMBA,
  933. {
  934. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  935. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  936. { LLM_TENSOR_OUTPUT, "output" },
  937. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  938. { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
  939. { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
  940. { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" },
  941. { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
  942. { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
  943. { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
  944. { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
  945. },
  946. },
  947. {
  948. LLM_ARCH_XVERSE,
  949. {
  950. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  951. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  952. { LLM_TENSOR_OUTPUT, "output" },
  953. { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
  954. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  955. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  956. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  957. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  958. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  959. { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
  960. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  961. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  962. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  963. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  964. },
  965. },
  966. {
  967. LLM_ARCH_COMMAND_R,
  968. {
  969. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  970. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  971. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  972. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  973. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  974. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  975. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  976. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  977. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  978. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  979. { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
  980. { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
  981. },
  982. },
  983. {
  984. LLM_ARCH_DBRX,
  985. {
  986. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  987. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  988. { LLM_TENSOR_OUTPUT, "output" },
  989. { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
  990. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  991. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  992. { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
  993. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  994. { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
  995. { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
  996. { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
  997. },
  998. },
  999. {
  1000. LLM_ARCH_OLMO,
  1001. {
  1002. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  1003. { LLM_TENSOR_OUTPUT, "output" },
  1004. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  1005. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  1006. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  1007. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  1008. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  1009. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  1010. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  1011. },
  1012. },
  1013. {
  1014. LLM_ARCH_ARCTIC,
  1015. {
  1016. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  1017. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  1018. { LLM_TENSOR_OUTPUT, "output" },
  1019. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  1020. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  1021. { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  1022. { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  1023. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  1024. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  1025. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  1026. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  1027. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  1028. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  1029. { LLM_TENSOR_FFN_NORM_EXPS, "blk.%d.ffn_norm_exps" },
  1030. { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
  1031. { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
  1032. { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
  1033. },
  1034. },
  1035. {
  1036. LLM_ARCH_DEEPSEEK2,
  1037. {
  1038. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  1039. { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  1040. { LLM_TENSOR_OUTPUT, "output" },
  1041. { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  1042. { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" },
  1043. { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
  1044. { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  1045. { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" },
  1046. { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" },
  1047. { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
  1048. { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
  1049. { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  1050. { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  1051. { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  1052. { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  1053. { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  1054. { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
  1055. { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
  1056. { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
  1057. { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
  1058. { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
  1059. { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
  1060. { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
  1061. { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
  1062. },
  1063. },
  1064. {
  1065. LLM_ARCH_UNKNOWN,
  1066. {
  1067. { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  1068. },
  1069. },
  1070. };
  1071. static llm_arch llm_arch_from_string(const std::string & name) {
  1072. for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
  1073. if (kv.second == name) {
  1074. return kv.first;
  1075. }
  1076. }
  1077. return LLM_ARCH_UNKNOWN;
  1078. }
  1079. // helper to handle gguf constants
  1080. // usage:
  1081. //
  1082. // const auto tn = LLM_TN(LLM_ARCH_LLAMA);
  1083. //
  1084. // std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
  1085. // std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
  1086. // std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
  1087. //
  1088. struct LLM_TN {
  1089. LLM_TN(llm_arch arch) : arch(arch) {}
  1090. llm_arch arch;
  1091. std::string operator()(llm_tensor tensor) const {
  1092. if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
  1093. return "__missing__";
  1094. }
  1095. return LLM_TENSOR_NAMES.at(arch).at(tensor);
  1096. }
  1097. std::string operator()(llm_tensor tensor, const std::string & suffix) const {
  1098. if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
  1099. return "__missing__";
  1100. }
  1101. return LLM_TENSOR_NAMES.at(arch).at(tensor) + "." + suffix;
  1102. }
  1103. std::string operator()(llm_tensor tensor, int bid) const {
  1104. if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
  1105. return "__missing__";
  1106. }
  1107. return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid);
  1108. }
  1109. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
  1110. if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
  1111. return "__missing__";
  1112. }
  1113. return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid) + "." + suffix;
  1114. }
  1115. std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
  1116. if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
  1117. return "__missing__";
  1118. }
  1119. return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid, xid) + "." + suffix;
  1120. }
  1121. };
  1122. //
  1123. // gguf helpers
  1124. //
  1125. static const std::map<llama_rope_scaling_type, const char *> LLAMA_ROPE_SCALING_TYPES = {
  1126. { LLAMA_ROPE_SCALING_TYPE_NONE, "none" },
  1127. { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" },
  1128. { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" },
  1129. };
  1130. static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
  1131. for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
  1132. if (kv.second == name) {
  1133. return (llama_rope_scaling_type) kv.first;
  1134. }
  1135. }
  1136. return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
  1137. }
  1138. static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
  1139. switch (type) {
  1140. case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
  1141. case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
  1142. case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
  1143. case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
  1144. case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
  1145. case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
  1146. case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
  1147. case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
  1148. case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
  1149. case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
  1150. case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
  1151. default: return format("unknown type %d", type);
  1152. }
  1153. }
  1154. static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
  1155. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  1156. switch (type) {
  1157. case GGUF_TYPE_STRING:
  1158. return gguf_get_val_str(ctx_gguf, i);
  1159. case GGUF_TYPE_ARRAY:
  1160. {
  1161. const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
  1162. int arr_n = gguf_get_arr_n(ctx_gguf, i);
  1163. const void * data = gguf_get_arr_data(ctx_gguf, i);
  1164. std::stringstream ss;
  1165. ss << "[";
  1166. for (int j = 0; j < arr_n; j++) {
  1167. if (arr_type == GGUF_TYPE_STRING) {
  1168. std::string val = gguf_get_arr_str(ctx_gguf, i, j);
  1169. // escape quotes
  1170. replace_all(val, "\\", "\\\\");
  1171. replace_all(val, "\"", "\\\"");
  1172. ss << '"' << val << '"';
  1173. } else if (arr_type == GGUF_TYPE_ARRAY) {
  1174. ss << "???";
  1175. } else {
  1176. ss << gguf_data_to_str(arr_type, data, j);
  1177. }
  1178. if (j < arr_n - 1) {
  1179. ss << ", ";
  1180. }
  1181. }
  1182. ss << "]";
  1183. return ss.str();
  1184. }
  1185. default:
  1186. return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
  1187. }
  1188. }
  1189. //
  1190. // llama helpers
  1191. //
  1192. #if defined(_WIN32)
  1193. static std::string llama_format_win_err(DWORD err) {
  1194. LPSTR buf;
  1195. size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
  1196. NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
  1197. if (!size) {
  1198. return "FormatMessageA failed";
  1199. }
  1200. std::string ret(buf, size);
  1201. LocalFree(buf);
  1202. return ret;
  1203. }
  1204. #endif
  1205. template <typename T>
  1206. struct no_init {
  1207. T value;
  1208. no_init() { /* do nothing */ }
  1209. };
  1210. struct llama_file {
  1211. // use FILE * so we don't have to re-open the file to mmap
  1212. FILE * fp;
  1213. size_t size;
  1214. llama_file(const char * fname, const char * mode) {
  1215. fp = ggml_fopen(fname, mode);
  1216. if (fp == NULL) {
  1217. throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
  1218. }
  1219. seek(0, SEEK_END);
  1220. size = tell();
  1221. seek(0, SEEK_SET);
  1222. }
  1223. size_t tell() const {
  1224. #ifdef _WIN32
  1225. __int64 ret = _ftelli64(fp);
  1226. #else
  1227. long ret = std::ftell(fp);
  1228. #endif
  1229. GGML_ASSERT(ret != -1); // this really shouldn't fail
  1230. return (size_t) ret;
  1231. }
  1232. void seek(size_t offset, int whence) const {
  1233. #ifdef _WIN32
  1234. int ret = _fseeki64(fp, (__int64) offset, whence);
  1235. #else
  1236. int ret = std::fseek(fp, (long) offset, whence);
  1237. #endif
  1238. GGML_ASSERT(ret == 0); // same
  1239. }
  1240. void read_raw(void * ptr, size_t len) const {
  1241. if (len == 0) {
  1242. return;
  1243. }
  1244. errno = 0;
  1245. std::size_t ret = std::fread(ptr, len, 1, fp);
  1246. if (ferror(fp)) {
  1247. throw std::runtime_error(format("read error: %s", strerror(errno)));
  1248. }
  1249. if (ret != 1) {
  1250. throw std::runtime_error("unexpectedly reached end of file");
  1251. }
  1252. }
  1253. uint32_t read_u32() const {
  1254. uint32_t ret;
  1255. read_raw(&ret, sizeof(ret));
  1256. return ret;
  1257. }
  1258. void write_raw(const void * ptr, size_t len) const {
  1259. if (len == 0) {
  1260. return;
  1261. }
  1262. errno = 0;
  1263. size_t ret = std::fwrite(ptr, len, 1, fp);
  1264. if (ret != 1) {
  1265. throw std::runtime_error(format("write error: %s", strerror(errno)));
  1266. }
  1267. }
  1268. void write_u32(std::uint32_t val) const {
  1269. write_raw(&val, sizeof(val));
  1270. }
  1271. ~llama_file() {
  1272. if (fp) {
  1273. std::fclose(fp);
  1274. }
  1275. }
  1276. };
  1277. using llama_files = std::vector<std::unique_ptr<llama_file>>;
  1278. struct llama_mmap {
  1279. void * addr;
  1280. size_t size;
  1281. llama_mmap(const llama_mmap &) = delete;
  1282. #ifdef _POSIX_MAPPED_FILES
  1283. static constexpr bool SUPPORTED = true;
  1284. // list of mapped fragments (first_offset, last_offset)
  1285. std::vector<std::pair<size_t, size_t>> mapped_fragments;
  1286. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
  1287. size = file->size;
  1288. int fd = fileno(file->fp);
  1289. int flags = MAP_SHARED;
  1290. // prefetch/readahead impairs performance on NUMA systems
  1291. if (numa) { prefetch = 0; }
  1292. #ifdef __linux__
  1293. // advise the kernel to read the file sequentially (increases readahead)
  1294. if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
  1295. LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n",
  1296. strerror(errno));
  1297. }
  1298. if (prefetch) { flags |= MAP_POPULATE; }
  1299. #endif
  1300. addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
  1301. if (addr == MAP_FAILED) { // NOLINT
  1302. throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
  1303. }
  1304. if (prefetch > 0) {
  1305. // advise the kernel to preload the mapped memory
  1306. if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
  1307. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
  1308. strerror(errno));
  1309. }
  1310. }
  1311. if (numa) {
  1312. // advise the kernel not to use readahead
  1313. // (because the next page might not belong on the same node)
  1314. if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
  1315. LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
  1316. strerror(errno));
  1317. }
  1318. }
  1319. // initialize list of mapped_fragments
  1320. mapped_fragments.emplace_back(0, file->size);
  1321. }
  1322. static void align_range(size_t * first, size_t * last, size_t page_size) {
  1323. // align first to the next page
  1324. size_t offset_in_page = *first & (page_size - 1);
  1325. size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page;
  1326. *first += offset_to_page;
  1327. // align last to the previous page
  1328. *last = *last & ~(page_size - 1);
  1329. if (*last <= *first) {
  1330. *last = *first;
  1331. }
  1332. }
  1333. // partially unmap the file in the range [first, last)
  1334. void unmap_fragment(size_t first, size_t last) {
  1335. // note: this function must not be called multiple times with overlapping ranges
  1336. // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
  1337. int page_size = sysconf(_SC_PAGESIZE);
  1338. align_range(&first, &last, page_size);
  1339. size_t len = last - first;
  1340. if (len == 0) {
  1341. return;
  1342. }
  1343. GGML_ASSERT(first % page_size == 0);
  1344. GGML_ASSERT(last % page_size == 0);
  1345. GGML_ASSERT(last > first);
  1346. void * next_page_start = (uint8_t *) addr + first;
  1347. // unmap the range
  1348. if (munmap(next_page_start, len)) {
  1349. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  1350. }
  1351. // update the list of mapped fragments to avoid unmapping the same range again in the destructor
  1352. std::vector<std::pair<size_t, size_t>> new_mapped_fragments;
  1353. for (const auto & frag : mapped_fragments) {
  1354. if (frag.first < first && frag.second > last) {
  1355. // the range is in the middle of the fragment, split it
  1356. new_mapped_fragments.emplace_back(frag.first, first);
  1357. new_mapped_fragments.emplace_back(last, frag.second);
  1358. } else if (frag.first < first && frag.second > first) {
  1359. // the range starts in the middle of the fragment
  1360. new_mapped_fragments.emplace_back(frag.first, first);
  1361. } else if (frag.first < last && frag.second > last) {
  1362. // the range ends in the middle of the fragment
  1363. new_mapped_fragments.emplace_back(last, frag.second);
  1364. } else if (frag.first >= first && frag.second <= last) {
  1365. // the range covers the entire fragment
  1366. } else {
  1367. // the range is outside the fragment
  1368. new_mapped_fragments.push_back(frag);
  1369. }
  1370. }
  1371. mapped_fragments = std::move(new_mapped_fragments);
  1372. }
  1373. ~llama_mmap() {
  1374. for (const auto & frag : mapped_fragments) {
  1375. if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
  1376. LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
  1377. }
  1378. }
  1379. }
  1380. #elif defined(_WIN32)
  1381. static constexpr bool SUPPORTED = true;
  1382. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) {
  1383. GGML_UNUSED(numa);
  1384. size = file->size;
  1385. HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
  1386. HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
  1387. if (hMapping == NULL) {
  1388. DWORD error = GetLastError();
  1389. throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
  1390. }
  1391. addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
  1392. DWORD error = GetLastError();
  1393. CloseHandle(hMapping);
  1394. if (addr == NULL) {
  1395. throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
  1396. }
  1397. if (prefetch > 0) {
  1398. #if _WIN32_WINNT >= 0x602
  1399. // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
  1400. BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
  1401. HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
  1402. // may fail on pre-Windows 8 systems
  1403. pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
  1404. if (pPrefetchVirtualMemory) {
  1405. // advise the kernel to preload the mapped memory
  1406. WIN32_MEMORY_RANGE_ENTRY range;
  1407. range.VirtualAddress = addr;
  1408. range.NumberOfBytes = (SIZE_T) std::min(size, prefetch);
  1409. if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
  1410. LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n",
  1411. llama_format_win_err(GetLastError()).c_str());
  1412. }
  1413. }
  1414. #else
  1415. throw std::runtime_error("PrefetchVirtualMemory unavailable");
  1416. #endif
  1417. }
  1418. }
  1419. void unmap_fragment(size_t first, size_t last) {
  1420. // not supported
  1421. GGML_UNUSED(first);
  1422. GGML_UNUSED(last);
  1423. }
  1424. ~llama_mmap() {
  1425. if (!UnmapViewOfFile(addr)) {
  1426. LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n",
  1427. llama_format_win_err(GetLastError()).c_str());
  1428. }
  1429. }
  1430. #else
  1431. static constexpr bool SUPPORTED = false;
  1432. llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) {
  1433. GGML_UNUSED(file);
  1434. GGML_UNUSED(prefetch);
  1435. GGML_UNUSED(numa);
  1436. throw std::runtime_error("mmap not supported");
  1437. }
  1438. void unmap_fragment(size_t first, size_t last) {
  1439. GGML_UNUSED(first);
  1440. GGML_UNUSED(last);
  1441. throw std::runtime_error("mmap not supported");
  1442. }
  1443. #endif
  1444. };
  1445. using llama_mmaps = std::vector<std::unique_ptr<llama_mmap>>;
  1446. // Represents some region of memory being locked using mlock or VirtualLock;
  1447. // will automatically unlock on destruction.
  1448. struct llama_mlock {
  1449. void * addr = NULL;
  1450. size_t size = 0;
  1451. bool failed_already = false;
  1452. llama_mlock() {}
  1453. llama_mlock(const llama_mlock &) = delete;
  1454. ~llama_mlock() {
  1455. if (size) {
  1456. raw_unlock(addr, size);
  1457. }
  1458. }
  1459. void init(void * ptr) {
  1460. GGML_ASSERT(addr == NULL && size == 0); // NOLINT
  1461. addr = ptr;
  1462. }
  1463. void grow_to(size_t target_size) {
  1464. GGML_ASSERT(addr);
  1465. if (failed_already) {
  1466. return;
  1467. }
  1468. size_t granularity = lock_granularity();
  1469. target_size = (target_size + granularity - 1) & ~(granularity - 1);
  1470. if (target_size > size) {
  1471. if (raw_lock((uint8_t *) addr + size, target_size - size)) {
  1472. size = target_size;
  1473. } else {
  1474. failed_already = true;
  1475. }
  1476. }
  1477. }
  1478. #ifdef _POSIX_MEMLOCK_RANGE
  1479. static constexpr bool SUPPORTED = true;
  1480. static size_t lock_granularity() {
  1481. return (size_t) sysconf(_SC_PAGESIZE);
  1482. }
  1483. #ifdef __APPLE__
  1484. #define MLOCK_SUGGESTION \
  1485. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  1486. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MEMLOCK (ulimit -l).\n"
  1487. #else
  1488. #define MLOCK_SUGGESTION \
  1489. "Try increasing RLIMIT_MEMLOCK ('ulimit -l' as root).\n"
  1490. #endif
  1491. bool raw_lock(const void * addr, size_t size) const {
  1492. if (!mlock(addr, size)) {
  1493. return true;
  1494. }
  1495. char* errmsg = std::strerror(errno);
  1496. bool suggest = (errno == ENOMEM);
  1497. // Check if the resource limit is fine after all
  1498. struct rlimit lock_limit;
  1499. if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
  1500. suggest = false;
  1501. }
  1502. if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
  1503. suggest = false;
  1504. }
  1505. LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
  1506. size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
  1507. return false;
  1508. }
  1509. #undef MLOCK_SUGGESTION
  1510. static void raw_unlock(void * addr, size_t size) {
  1511. if (munlock(addr, size)) {
  1512. LLAMA_LOG_WARN("warning: failed to munlock buffer: %s\n", std::strerror(errno));
  1513. }
  1514. }
  1515. #elif defined(_WIN32)
  1516. static constexpr bool SUPPORTED = true;
  1517. static size_t lock_granularity() {
  1518. SYSTEM_INFO si;
  1519. GetSystemInfo(&si);
  1520. return (size_t) si.dwPageSize;
  1521. }
  1522. bool raw_lock(void * ptr, size_t len) const {
  1523. for (int tries = 1; ; tries++) {
  1524. if (VirtualLock(ptr, len)) {
  1525. return true;
  1526. }
  1527. if (tries == 2) {
  1528. LLAMA_LOG_WARN("warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
  1529. len, size, llama_format_win_err(GetLastError()).c_str());
  1530. return false;
  1531. }
  1532. // It failed but this was only the first try; increase the working
  1533. // set size and try again.
  1534. SIZE_T min_ws_size, max_ws_size;
  1535. if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
  1536. LLAMA_LOG_WARN("warning: GetProcessWorkingSetSize failed: %s\n",
  1537. llama_format_win_err(GetLastError()).c_str());
  1538. return false;
  1539. }
  1540. // Per MSDN: "The maximum number of pages that a process can lock
  1541. // is equal to the number of pages in its minimum working set minus
  1542. // a small overhead."
  1543. // Hopefully a megabyte is enough overhead:
  1544. size_t increment = len + 1048576;
  1545. // The minimum must be <= the maximum, so we need to increase both:
  1546. min_ws_size += increment;
  1547. max_ws_size += increment;
  1548. if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
  1549. LLAMA_LOG_WARN("warning: SetProcessWorkingSetSize failed: %s\n",
  1550. llama_format_win_err(GetLastError()).c_str());
  1551. return false;
  1552. }
  1553. }
  1554. }
  1555. static void raw_unlock(void * ptr, size_t len) {
  1556. if (!VirtualUnlock(ptr, len)) {
  1557. LLAMA_LOG_WARN("warning: failed to VirtualUnlock buffer: %s\n",
  1558. llama_format_win_err(GetLastError()).c_str());
  1559. }
  1560. }
  1561. #else
  1562. static constexpr bool SUPPORTED = false;
  1563. static size_t lock_granularity() {
  1564. return (size_t) 65536;
  1565. }
  1566. bool raw_lock(const void * addr, size_t len) const {
  1567. LLAMA_LOG_WARN("warning: mlock not supported on this system\n");
  1568. return false;
  1569. }
  1570. static void raw_unlock(const void * addr, size_t len) {}
  1571. #endif
  1572. };
  1573. using llama_mlocks = std::vector<std::unique_ptr<llama_mlock>>;
  1574. // NOTE: avoid ever using this except for building the token_to_piece caches
  1575. static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) {
  1576. std::vector<char> result(8, 0);
  1577. const int n_tokens = llama_token_to_piece(model, token, result.data(), result.size(), special);
  1578. if (n_tokens < 0) {
  1579. result.resize(-n_tokens);
  1580. int check = llama_token_to_piece(model, token, result.data(), result.size(), special);
  1581. GGML_ASSERT(check == -n_tokens);
  1582. }
  1583. else {
  1584. result.resize(n_tokens);
  1585. }
  1586. return std::string(result.data(), result.size());
  1587. }
  1588. static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer) {
  1589. ggml_backend_buffer_type_t buft = nullptr;
  1590. #if defined(GGML_USE_CUDA)
  1591. // host buffers should only be used when data is expected to be copied to/from the GPU
  1592. if (host_buffer) {
  1593. buft = ggml_backend_cuda_host_buffer_type();
  1594. }
  1595. #elif defined(GGML_USE_SYCL)
  1596. if (host_buffer) {
  1597. buft = ggml_backend_sycl_host_buffer_type();
  1598. }
  1599. #elif defined(GGML_USE_CPU_HBM)
  1600. buft = ggml_backend_cpu_hbm_buffer_type();
  1601. #elif defined(GGML_USE_VULKAN)
  1602. if (host_buffer) {
  1603. buft = ggml_backend_vk_host_buffer_type();
  1604. }
  1605. #endif
  1606. if (buft == nullptr) {
  1607. buft = ggml_backend_cpu_buffer_type();
  1608. }
  1609. return buft;
  1610. GGML_UNUSED(host_buffer);
  1611. }
  1612. //
  1613. // globals
  1614. //
  1615. struct llama_state {
  1616. llama_state() {
  1617. #ifdef GGML_USE_METAL
  1618. ggml_backend_metal_log_set_callback(log_callback, log_callback_user_data);
  1619. #elif defined(GGML_USE_CUDA)
  1620. ggml_backend_cuda_log_set_callback(log_callback, log_callback_user_data);
  1621. #endif
  1622. }
  1623. // We save the log callback globally
  1624. ggml_log_callback log_callback = llama_log_callback_default;
  1625. void * log_callback_user_data = nullptr;
  1626. };
  1627. static llama_state g_state;
  1628. // available llama models
  1629. enum e_model {
  1630. MODEL_UNKNOWN,
  1631. MODEL_14M,
  1632. MODEL_17M,
  1633. MODEL_22M,
  1634. MODEL_33M,
  1635. MODEL_70M,
  1636. MODEL_109M,
  1637. MODEL_137M,
  1638. MODEL_160M,
  1639. MODEL_335M,
  1640. MODEL_410M,
  1641. MODEL_0_5B,
  1642. MODEL_1B,
  1643. MODEL_1_4B,
  1644. MODEL_2B,
  1645. MODEL_2_8B,
  1646. MODEL_3B,
  1647. MODEL_4B,
  1648. MODEL_6_9B,
  1649. MODEL_7B,
  1650. MODEL_8B,
  1651. MODEL_12B,
  1652. MODEL_13B,
  1653. MODEL_14B,
  1654. MODEL_15B,
  1655. MODEL_16B,
  1656. MODEL_20B,
  1657. MODEL_30B,
  1658. MODEL_34B,
  1659. MODEL_35B,
  1660. MODEL_40B,
  1661. MODEL_65B,
  1662. MODEL_70B,
  1663. MODEL_236B,
  1664. MODEL_314B,
  1665. MODEL_SMALL,
  1666. MODEL_MEDIUM,
  1667. MODEL_LARGE,
  1668. MODEL_XL,
  1669. MODEL_A2_7B,
  1670. MODEL_8x7B,
  1671. MODEL_8x22B,
  1672. MODEL_16x12B,
  1673. MODEL_10B_128x3_66B,
  1674. };
  1675. static const size_t kiB = 1024;
  1676. static const size_t MiB = 1024*kiB;
  1677. static const size_t GiB = 1024*MiB;
  1678. struct llama_hparams {
  1679. bool vocab_only;
  1680. bool rope_finetuned;
  1681. bool use_par_res;
  1682. uint32_t n_vocab;
  1683. uint32_t n_ctx_train; // context size the model was trained on
  1684. uint32_t n_embd;
  1685. uint32_t n_head;
  1686. uint32_t n_head_kv;
  1687. uint32_t n_layer;
  1688. uint32_t n_rot;
  1689. uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
  1690. uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
  1691. uint32_t n_ff;
  1692. uint32_t n_expert = 0;
  1693. uint32_t n_expert_used = 0;
  1694. uint32_t n_vocab_type = 0; // for BERT-style token types
  1695. uint32_t n_layer_dense_lead = 0;
  1696. uint32_t n_lora_q = 0;
  1697. uint32_t n_lora_kv = 0;
  1698. uint32_t n_ff_exp = 0;
  1699. uint32_t n_expert_shared = 0;
  1700. float expert_weights_scale = 0.0;
  1701. float f_norm_eps;
  1702. float f_norm_rms_eps;
  1703. float rope_attn_factor = 1.0f;
  1704. float rope_freq_base_train;
  1705. float rope_freq_scale_train;
  1706. uint32_t n_ctx_orig_yarn;
  1707. float rope_yarn_log_mul;
  1708. // for State Space Models
  1709. uint32_t ssm_d_conv = 0;
  1710. uint32_t ssm_d_inner = 0;
  1711. uint32_t ssm_d_state = 0;
  1712. uint32_t ssm_dt_rank = 0;
  1713. float f_clamp_kqv = 0.0f;
  1714. float f_max_alibi_bias = 0.0f;
  1715. float f_logit_scale = 0.0f;
  1716. bool causal_attn = true;
  1717. bool use_alibi = false;
  1718. enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
  1719. enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
  1720. enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
  1721. bool operator!=(const llama_hparams & other) const {
  1722. if (this->vocab_only != other.vocab_only) return true;
  1723. if (this->n_vocab != other.n_vocab) return true;
  1724. if (this->n_ctx_train != other.n_ctx_train) return true;
  1725. if (this->n_embd != other.n_embd) return true;
  1726. if (this->n_head != other.n_head) return true;
  1727. if (this->n_head_kv != other.n_head_kv) return true;
  1728. if (this->n_layer != other.n_layer) return true;
  1729. if (this->n_rot != other.n_rot) return true;
  1730. if (this->n_embd_head_k != other.n_embd_head_k) return true;
  1731. if (this->n_embd_head_v != other.n_embd_head_v) return true;
  1732. if (this->n_ff != other.n_ff) return true;
  1733. if (this->n_expert != other.n_expert) return true;
  1734. if (this->n_expert_used != other.n_expert_used) return true;
  1735. if (this->n_layer_dense_lead != other.n_layer_dense_lead) return true;
  1736. if (this->n_lora_q != other.n_lora_q) return true;
  1737. if (this->n_lora_kv != other.n_lora_kv) return true;
  1738. if (this->n_ff_exp != other.n_ff_exp) return true;
  1739. if (this->n_expert_shared != other.n_expert_shared) return true;
  1740. if (this->rope_finetuned != other.rope_finetuned) return true;
  1741. if (this->n_ctx_orig_yarn != other.n_ctx_orig_yarn) return true;
  1742. if (this->ssm_d_conv != other.ssm_d_conv) return true;
  1743. if (this->ssm_d_inner != other.ssm_d_inner) return true;
  1744. if (this->ssm_d_state != other.ssm_d_state) return true;
  1745. if (this->ssm_dt_rank != other.ssm_dt_rank) return true;
  1746. const float EPSILON = 1e-9f;
  1747. if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
  1748. if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
  1749. if (!is_float_close(this->rope_attn_factor, other.rope_attn_factor, EPSILON)) return true;
  1750. if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true;
  1751. if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true;
  1752. if (!is_float_close(this->expert_weights_scale, other.expert_weights_scale, EPSILON)) return true;
  1753. if (!is_float_close(this->rope_yarn_log_mul, other.rope_yarn_log_mul, EPSILON)) return true;
  1754. return false;
  1755. }
  1756. uint32_t n_gqa() const {
  1757. if (n_head_kv == 0) {
  1758. return 0;
  1759. }
  1760. return n_head/n_head_kv;
  1761. }
  1762. uint32_t n_embd_k_gqa() const { // dimension of key embeddings across all k-v heads
  1763. return n_embd_head_k * n_head_kv;
  1764. }
  1765. uint32_t n_embd_v_gqa() const { // dimension of value embeddings across all k-v heads
  1766. return n_embd_head_v * n_head_kv;
  1767. }
  1768. uint32_t n_embd_k_s() const { // dimension of the rolling state embeddings
  1769. // corresponds to Mamba's conv_states size
  1770. // TODO: maybe support other convolution strides than 1
  1771. // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
  1772. return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner;
  1773. }
  1774. uint32_t n_embd_v_s() const { // dimension of the recurrent state embeddings
  1775. // corresponds to Mamba's ssm_states size
  1776. return ssm_d_state * ssm_d_inner;
  1777. }
  1778. };
  1779. struct llama_cparams {
  1780. uint32_t n_ctx; // context size used during inference
  1781. uint32_t n_batch;
  1782. uint32_t n_ubatch;
  1783. uint32_t n_seq_max;
  1784. uint32_t n_threads; // number of threads to use for generation
  1785. uint32_t n_threads_batch; // number of threads to use for batch processing
  1786. float rope_freq_base;
  1787. float rope_freq_scale;
  1788. uint32_t n_ctx_orig_yarn;
  1789. // These hyperparameters are not exposed in GGUF, because all
  1790. // existing YaRN models use the same values for them.
  1791. float yarn_ext_factor;
  1792. float yarn_attn_factor;
  1793. float yarn_beta_fast;
  1794. float yarn_beta_slow;
  1795. float defrag_thold;
  1796. bool embeddings;
  1797. bool causal_attn;
  1798. bool offload_kqv;
  1799. bool flash_attn;
  1800. enum llama_pooling_type pooling_type;
  1801. ggml_backend_sched_eval_callback cb_eval;
  1802. void * cb_eval_user_data;
  1803. };
  1804. struct llama_layer {
  1805. // normalization
  1806. struct ggml_tensor * attn_norm;
  1807. struct ggml_tensor * attn_norm_b;
  1808. struct ggml_tensor * attn_norm_2;
  1809. struct ggml_tensor * attn_norm_2_b;
  1810. struct ggml_tensor * attn_q_norm;
  1811. struct ggml_tensor * attn_q_norm_b;
  1812. struct ggml_tensor * attn_k_norm;
  1813. struct ggml_tensor * attn_k_norm_b;
  1814. struct ggml_tensor * attn_out_norm;
  1815. struct ggml_tensor * attn_out_norm_b;
  1816. struct ggml_tensor * attn_q_a_norm;
  1817. struct ggml_tensor * attn_kv_a_norm;
  1818. // attention
  1819. struct ggml_tensor * wq;
  1820. struct ggml_tensor * wk;
  1821. struct ggml_tensor * wv;
  1822. struct ggml_tensor * wo;
  1823. struct ggml_tensor * wqkv;
  1824. struct ggml_tensor * wq_a;
  1825. struct ggml_tensor * wq_b;
  1826. struct ggml_tensor * wkv_a_mqa;
  1827. struct ggml_tensor * wkv_b;
  1828. // attention bias
  1829. struct ggml_tensor * bq;
  1830. struct ggml_tensor * bk;
  1831. struct ggml_tensor * bv;
  1832. struct ggml_tensor * bo;
  1833. struct ggml_tensor * bqkv;
  1834. // normalization
  1835. struct ggml_tensor * ffn_norm;
  1836. struct ggml_tensor * ffn_norm_b;
  1837. struct ggml_tensor * layer_out_norm;
  1838. struct ggml_tensor * layer_out_norm_b;
  1839. struct ggml_tensor * ffn_norm_exps;
  1840. // ff
  1841. struct ggml_tensor * ffn_gate; // w1
  1842. struct ggml_tensor * ffn_down; // w2
  1843. struct ggml_tensor * ffn_up; // w3
  1844. // ff MoE
  1845. struct ggml_tensor * ffn_gate_inp;
  1846. struct ggml_tensor * ffn_gate_exps;
  1847. struct ggml_tensor * ffn_down_exps;
  1848. struct ggml_tensor * ffn_up_exps ;
  1849. // ff shared expert (shexp)
  1850. struct ggml_tensor * ffn_gate_inp_shexp;
  1851. struct ggml_tensor * ffn_gate_shexp;
  1852. struct ggml_tensor * ffn_down_shexp;
  1853. struct ggml_tensor * ffn_up_shexp;
  1854. // ff bias
  1855. struct ggml_tensor * ffn_gate_b = nullptr;
  1856. struct ggml_tensor * ffn_down_b = nullptr; // b2
  1857. struct ggml_tensor * ffn_up_b = nullptr; // b3
  1858. struct ggml_tensor * ffn_act;
  1859. // mamba proj
  1860. struct ggml_tensor * ssm_in;
  1861. struct ggml_tensor * ssm_x;
  1862. struct ggml_tensor * ssm_dt;
  1863. struct ggml_tensor * ssm_out;
  1864. // mamba
  1865. struct ggml_tensor * ssm_conv1d;
  1866. struct ggml_tensor * ssm_a;
  1867. struct ggml_tensor * ssm_d;
  1868. // mamba bias
  1869. struct ggml_tensor * ssm_conv1d_b;
  1870. struct ggml_tensor * ssm_dt_b;
  1871. // long rope factors
  1872. struct ggml_tensor * rope_long = nullptr;
  1873. struct ggml_tensor * rope_short = nullptr;
  1874. };
  1875. struct llama_kv_cell {
  1876. llama_pos pos = -1;
  1877. llama_pos delta = 0;
  1878. int32_t src = 0; // used by recurrent state models to copy states
  1879. std::set<llama_seq_id> seq_id;
  1880. bool has_seq_id(const llama_seq_id & id) const {
  1881. return seq_id.find(id) != seq_id.end();
  1882. }
  1883. bool is_empty() const {
  1884. return seq_id.empty();
  1885. }
  1886. bool is_same_seq(const llama_kv_cell & other) const {
  1887. return seq_id == other.seq_id;
  1888. }
  1889. };
  1890. // ring-buffer of cached KV data
  1891. struct llama_kv_cache {
  1892. bool has_shift = false;
  1893. bool do_defrag = false;
  1894. bool do_copy = false;
  1895. bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
  1896. bool v_trans = true; // the value tensor is transposed
  1897. // Note: The value of head isn't only used to optimize searching
  1898. // for a free KV slot. llama_decode_internal also uses it, so it
  1899. // cannot be freely changed after a slot has been allocated.
  1900. uint32_t head = 0;
  1901. uint32_t size = 0;
  1902. uint32_t used = 0; // used cells (i.e. at least one seq_id)
  1903. // computed before each graph build
  1904. uint32_t n = 0;
  1905. ggml_type type_k = GGML_TYPE_F16;
  1906. ggml_type type_v = GGML_TYPE_F16;
  1907. std::vector<llama_kv_cell> cells;
  1908. std::vector<struct ggml_tensor *> k_l; // per layer
  1909. std::vector<struct ggml_tensor *> v_l;
  1910. std::vector<struct ggml_context *> ctxs;
  1911. std::vector<ggml_backend_buffer_t> bufs;
  1912. size_t total_size() const {
  1913. size_t size = 0;
  1914. for (ggml_backend_buffer_t buf : bufs) {
  1915. size += ggml_backend_buffer_get_size(buf);
  1916. }
  1917. return size;
  1918. }
  1919. ~llama_kv_cache() {
  1920. for (struct ggml_context * ctx : ctxs) {
  1921. ggml_free(ctx);
  1922. }
  1923. for (ggml_backend_buffer_t buf : bufs) {
  1924. ggml_backend_buffer_free(buf);
  1925. }
  1926. }
  1927. };
  1928. struct llama_control_vector {
  1929. std::vector<struct ggml_tensor *> tensors; // per layer
  1930. std::vector<struct ggml_context *> ctxs;
  1931. std::vector<ggml_backend_buffer_t> bufs;
  1932. int32_t layer_start = -1;
  1933. int32_t layer_end = -1;
  1934. ggml_tensor * tensor_for(int il) const {
  1935. if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
  1936. return nullptr;
  1937. }
  1938. return tensors[il];
  1939. }
  1940. ~llama_control_vector() {
  1941. for (struct ggml_context * ctx : ctxs) {
  1942. ggml_free(ctx);
  1943. }
  1944. for (ggml_backend_buffer_t buf : bufs) {
  1945. ggml_backend_buffer_free(buf);
  1946. }
  1947. }
  1948. };
  1949. struct llama_vocab {
  1950. using id = int32_t;
  1951. using token = std::string;
  1952. using tattr = llama_token_attr;
  1953. struct token_data {
  1954. token text;
  1955. float score;
  1956. tattr attr;
  1957. };
  1958. enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM;
  1959. enum llama_vocab_pre_type type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  1960. std::unordered_map<token, id> token_to_id;
  1961. std::vector<token_data> id_to_token;
  1962. std::vector<id> cache_special_tokens;
  1963. std::vector<token> cache_token_to_piece; // llama_token_to_piece(special = true);
  1964. std::map<std::pair<std::string, std::string>, int> bpe_ranks;
  1965. // default LLaMA special tokens
  1966. id special_bos_id = 1;
  1967. id special_eos_id = 2;
  1968. id special_unk_id = 0;
  1969. id special_sep_id = -1;
  1970. id special_pad_id = -1;
  1971. id special_cls_id = -1;
  1972. id special_mask_id = -1;
  1973. int special_add_bos = -1; // -1 unknown, 1 add, 0 don't add.
  1974. int special_add_eos = -1; // -1 unknown, 1 add, 0 don't add.
  1975. id linefeed_id = 13;
  1976. id special_prefix_id = -1;
  1977. id special_suffix_id = -1;
  1978. id special_middle_id = -1;
  1979. id special_eot_id = -1; // TODO: move above after "eos_id", and here add "file separator" token
  1980. bool add_space_prefix = true;
  1981. int find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
  1982. GGML_ASSERT(token_left.find(' ') == std::string::npos);
  1983. GGML_ASSERT(token_left.find('\n') == std::string::npos);
  1984. GGML_ASSERT(token_right.find(' ') == std::string::npos);
  1985. GGML_ASSERT(token_right.find('\n') == std::string::npos);
  1986. auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
  1987. if (it == bpe_ranks.end()) {
  1988. return -1;
  1989. }
  1990. return it->second;
  1991. }
  1992. };
  1993. struct llama_model {
  1994. e_model type = MODEL_UNKNOWN;
  1995. llm_arch arch = LLM_ARCH_UNKNOWN;
  1996. llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
  1997. std::string name = "n/a";
  1998. llama_hparams hparams = {};
  1999. llama_vocab vocab;
  2000. struct ggml_tensor * tok_embd;
  2001. struct ggml_tensor * type_embd;
  2002. struct ggml_tensor * pos_embd;
  2003. struct ggml_tensor * tok_norm;
  2004. struct ggml_tensor * tok_norm_b;
  2005. struct ggml_tensor * output_norm;
  2006. struct ggml_tensor * output_norm_b;
  2007. struct ggml_tensor * output;
  2008. struct ggml_tensor * output_b;
  2009. std::vector<llama_layer> layers;
  2010. llama_split_mode split_mode;
  2011. int main_gpu;
  2012. int n_gpu_layers;
  2013. std::vector<std::string> rpc_servers;
  2014. // gguf metadata
  2015. std::unordered_map<std::string, std::string> gguf_kv;
  2016. // layer -> buffer type mapping
  2017. struct layer_buft {
  2018. layer_buft() : buft_matrix(nullptr), buft(nullptr) {}
  2019. layer_buft(ggml_backend_buffer_type_t matrix) : buft_matrix(matrix), buft(matrix) {}
  2020. layer_buft(ggml_backend_buffer_type_t matrix, ggml_backend_buffer_type_t other) : buft_matrix(matrix), buft(other) {}
  2021. ggml_backend_buffer_type_t buft_matrix; // matrices only - used by split buffers and backends that support only matrix multiplication
  2022. ggml_backend_buffer_type_t buft; // everything else
  2023. };
  2024. layer_buft buft_input;
  2025. layer_buft buft_output;
  2026. std::vector<layer_buft> buft_layer;
  2027. // contexts where the model tensors metadata is stored
  2028. std::vector<struct ggml_context *> ctxs;
  2029. // the model memory buffers for the tensor data
  2030. std::vector<ggml_backend_buffer_t> bufs;
  2031. // model memory mapped files
  2032. llama_mmaps mappings;
  2033. // objects representing data potentially being locked in memory
  2034. llama_mlocks mlock_bufs;
  2035. llama_mlocks mlock_mmaps;
  2036. // for quantize-stats only
  2037. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  2038. int64_t t_load_us = 0;
  2039. int64_t t_start_us = 0;
  2040. ~llama_model() {
  2041. for (struct ggml_context * ctx : ctxs) {
  2042. ggml_free(ctx);
  2043. }
  2044. for (ggml_backend_buffer_t buf : bufs) {
  2045. #ifdef GGML_USE_CUDA
  2046. if (ggml_backend_buffer_get_type(buf) == ggml_backend_cpu_buffer_type()) {
  2047. ggml_backend_cuda_unregister_host_buffer(ggml_backend_buffer_get_base(buf));
  2048. }
  2049. #endif
  2050. ggml_backend_buffer_free(buf);
  2051. }
  2052. }
  2053. };
  2054. struct llama_context {
  2055. llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {}
  2056. ~llama_context() {
  2057. ggml_backend_sched_free(sched);
  2058. for (ggml_backend_t backend : backends) {
  2059. ggml_backend_free(backend);
  2060. }
  2061. ggml_backend_buffer_free(buf_output);
  2062. }
  2063. llama_cparams cparams;
  2064. std::vector<ggml_backend_t> backends;
  2065. #ifdef GGML_USE_METAL
  2066. ggml_backend_t backend_metal = nullptr;
  2067. #endif
  2068. ggml_backend_t backend_cpu = nullptr;
  2069. const llama_model & model;
  2070. // key + value cache for the self attention
  2071. struct llama_kv_cache kv_self;
  2072. std::mt19937 rng;
  2073. bool has_evaluated_once = false;
  2074. int64_t t_start_us;
  2075. int64_t t_load_us;
  2076. int64_t t_sample_us = 0;
  2077. int64_t t_p_eval_us = 0;
  2078. int64_t t_eval_us = 0;
  2079. int64_t t_compute_start_us = 0;
  2080. int64_t n_queued_tokens = 0;
  2081. int32_t n_sample = 0; // number of tokens sampled
  2082. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  2083. int32_t n_eval = 0; // number of eval calls
  2084. // host buffer for the model output (logits and embeddings)
  2085. ggml_backend_buffer_t buf_output = nullptr;
  2086. // decode output (2-dimensional array: [n_outputs][n_vocab])
  2087. size_t logits_size = 0; // capacity (of floats) for logits
  2088. float * logits = nullptr;
  2089. std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
  2090. size_t output_size = 0; // capacity (of tokens positions) for the output buffers
  2091. int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
  2092. bool logits_all = false;
  2093. // embeddings output (2-dimensional array: [n_outputs][n_embd])
  2094. // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
  2095. size_t embd_size = 0; // capacity (of floats) for embeddings
  2096. float * embd = nullptr;
  2097. // sequence embeddings output (map of [n_embd] vectors)
  2098. // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
  2099. std::map<llama_seq_id, std::vector<float>> embd_seq;
  2100. // memory buffers used to evaluate the model
  2101. std::vector<uint8_t> buf_compute_meta;
  2102. ggml_backend_sched_t sched = nullptr;
  2103. ggml_abort_callback abort_callback = nullptr;
  2104. void * abort_callback_data = nullptr;
  2105. // input tensors
  2106. struct ggml_tensor * inp_tokens; // I32 [n_batch]
  2107. struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
  2108. struct ggml_tensor * inp_pos; // I32 [n_batch]
  2109. struct ggml_tensor * inp_out_ids; // I32 [n_outputs]
  2110. struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
  2111. struct ggml_tensor * inp_K_shift; // I32 [kv_size]
  2112. struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
  2113. struct ggml_tensor * inp_cls; // I32 [n_batch]
  2114. struct ggml_tensor * inp_s_copy; // I32 [kv_size]
  2115. struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]
  2116. struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch]
  2117. // control vectors
  2118. struct llama_control_vector cvec;
  2119. };
  2120. static size_t llama_get_device_count(const llama_model & model) {
  2121. size_t count = 1;
  2122. #if defined(GGML_USE_CUDA)
  2123. count = ggml_backend_cuda_get_device_count();
  2124. #elif defined(GGML_USE_SYCL)
  2125. count = ggml_backend_sycl_get_device_count();
  2126. #elif defined(GGML_USE_VULKAN)
  2127. count = ggml_backend_vk_get_device_count();
  2128. #endif
  2129. #if defined(GGML_USE_RPC)
  2130. count += model.rpc_servers.size();
  2131. #endif
  2132. return count;
  2133. GGML_UNUSED(model);
  2134. }
  2135. static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_model & model, int gpu) {
  2136. ggml_backend_buffer_type_t buft = nullptr;
  2137. #if defined(GGML_USE_RPC)
  2138. int dev_count = (int)llama_get_device_count(model);
  2139. int rpc_count = (int)model.rpc_servers.size();
  2140. if (gpu >= dev_count - rpc_count) {
  2141. const char * endpoint = model.rpc_servers[gpu - dev_count + rpc_count].c_str();
  2142. return ggml_backend_rpc_buffer_type(endpoint);
  2143. }
  2144. #endif
  2145. #if defined(GGML_USE_METAL)
  2146. buft = ggml_backend_metal_buffer_type();
  2147. #elif defined(GGML_USE_CUDA)
  2148. buft = ggml_backend_cuda_buffer_type(gpu);
  2149. #elif defined(GGML_USE_VULKAN)
  2150. buft = ggml_backend_vk_buffer_type(gpu);
  2151. #elif defined(GGML_USE_SYCL)
  2152. buft = ggml_backend_sycl_buffer_type(gpu);
  2153. #elif defined(GGML_USE_KOMPUTE)
  2154. buft = ggml_backend_kompute_buffer_type(gpu);
  2155. if (buft == nullptr) {
  2156. LLAMA_LOG_WARN("%s: cannot use GPU %d, check `vulkaninfo --summary`\n", __func__, gpu);
  2157. }
  2158. #endif
  2159. if (buft == nullptr) {
  2160. buft = llama_default_buffer_type_cpu(true);
  2161. }
  2162. return buft;
  2163. GGML_UNUSED(model);
  2164. GGML_UNUSED(gpu);
  2165. }
  2166. static ggml_backend_buffer_type_t llama_default_buffer_type_split(const llama_model & model, int fallback_gpu, const float * tensor_split) {
  2167. ggml_backend_buffer_type_t buft = nullptr;
  2168. #ifdef GGML_USE_CUDA
  2169. if (ggml_backend_cuda_get_device_count() > 1) {
  2170. buft = ggml_backend_cuda_split_buffer_type(tensor_split);
  2171. }
  2172. #endif
  2173. #ifdef GGML_USE_SYCL
  2174. if (ggml_backend_sycl_get_device_count() > 1) {
  2175. buft = ggml_backend_sycl_split_buffer_type(tensor_split);
  2176. }
  2177. #endif
  2178. if (buft == nullptr) {
  2179. buft = llama_default_buffer_type_offload(model, fallback_gpu);
  2180. }
  2181. return buft;
  2182. GGML_UNUSED(tensor_split);
  2183. }
  2184. static size_t llama_get_device_memory(const llama_model & model, int device) {
  2185. #if defined(GGML_USE_RPC)
  2186. int dev_count = (int)llama_get_device_count(model);
  2187. int rpc_count = (int)model.rpc_servers.size();
  2188. if (device >= dev_count - rpc_count) {
  2189. size_t total;
  2190. size_t free;
  2191. const char * endpoint = model.rpc_servers[device - dev_count + rpc_count].c_str();
  2192. ggml_backend_rpc_get_device_memory(endpoint, &free, &total);
  2193. return free;
  2194. }
  2195. #endif
  2196. #if defined(GGML_USE_CUDA)
  2197. size_t total;
  2198. size_t free;
  2199. ggml_backend_cuda_get_device_memory(device, &free, &total);
  2200. return free;
  2201. #elif defined(GGML_USE_SYCL)
  2202. size_t total;
  2203. size_t free;
  2204. ggml_backend_sycl_get_device_memory(device, &free, &total);
  2205. return free;
  2206. #elif defined(GGML_USE_VULKAN)
  2207. size_t total;
  2208. size_t free;
  2209. ggml_backend_vk_get_device_memory(device, &free, &total);
  2210. return free;
  2211. #else
  2212. return 1;
  2213. #endif
  2214. GGML_UNUSED(model);
  2215. GGML_UNUSED(device);
  2216. }
  2217. //
  2218. // kv cache helpers
  2219. //
  2220. static bool llama_kv_cache_init(
  2221. struct llama_kv_cache & cache,
  2222. const llama_context * ctx,
  2223. ggml_type type_k,
  2224. ggml_type type_v,
  2225. uint32_t kv_size,
  2226. bool offload) {
  2227. const llama_model & model = ctx->model;
  2228. const llama_cparams & cparams = ctx->cparams;
  2229. const struct llama_hparams & hparams = model.hparams;
  2230. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  2231. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  2232. const int64_t n_layer = hparams.n_layer;
  2233. cache.has_shift = false;
  2234. // TODO: find a nicer way to add other recurrent model architectures
  2235. cache.recurrent = model.arch == LLM_ARCH_MAMBA;
  2236. cache.v_trans = !cparams.flash_attn;
  2237. // TODO: support mixed recurrent Transformer architectures
  2238. // NOTE: (!a || b) is a logical implication (a -> b)
  2239. GGML_ASSERT(!cache.recurrent || n_embd_k_gqa == hparams.n_embd_k_s());
  2240. GGML_ASSERT(!cache.recurrent || n_embd_v_gqa == hparams.n_embd_v_s());
  2241. GGML_ASSERT( cache.recurrent || n_embd_k_gqa == hparams.n_embd_k_gqa());
  2242. GGML_ASSERT( cache.recurrent || n_embd_v_gqa == hparams.n_embd_v_gqa());
  2243. cache.head = 0;
  2244. cache.size = kv_size;
  2245. cache.used = 0;
  2246. cache.type_k = type_k;
  2247. cache.type_v = type_v;
  2248. cache.cells.clear();
  2249. cache.cells.resize(kv_size);
  2250. if (cache.recurrent) {
  2251. // init state copy sources
  2252. for (uint32_t i = 0; i < cache.size; ++i) {
  2253. cache.cells[i].src = i;
  2254. }
  2255. }
  2256. // count used buffer types
  2257. std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
  2258. if (offload) {
  2259. for (int64_t i = 0; i < n_layer; ++i) {
  2260. buft_layer_count[model.buft_layer[i].buft]++;
  2261. }
  2262. } else {
  2263. buft_layer_count[llama_default_buffer_type_cpu(true)] = n_layer;
  2264. }
  2265. // create a context for each buffer type
  2266. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  2267. for (auto & it : buft_layer_count) {
  2268. int n_layers = it.second;
  2269. struct ggml_init_params params = {
  2270. /*.mem_size =*/ 2u*n_layers*ggml_tensor_overhead(),
  2271. /*.mem_buffer =*/ NULL,
  2272. /*.no_alloc =*/ true,
  2273. };
  2274. ggml_context * ctx = ggml_init(params);
  2275. if (!ctx) {
  2276. LLAMA_LOG_ERROR("%s: failed to allocate context for kv cache\n", __func__);
  2277. return false;
  2278. }
  2279. ctx_map[it.first] = ctx;
  2280. cache.ctxs.push_back(ctx);
  2281. }
  2282. cache.k_l.reserve(n_layer);
  2283. cache.v_l.reserve(n_layer);
  2284. for (int i = 0; i < (int) n_layer; i++) {
  2285. struct ggml_context * ctx = offload ? ctx_map.at(model.buft_layer[i].buft) : cache.ctxs.front();
  2286. ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
  2287. ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size);
  2288. ggml_format_name(k, "cache_k_l%d", i);
  2289. ggml_format_name(v, "cache_v_l%d", i);
  2290. cache.k_l.push_back(k);
  2291. cache.v_l.push_back(v);
  2292. }
  2293. // allocate tensors and initialize the buffers to avoid NaNs in the padding
  2294. for (auto it : ctx_map) {
  2295. ggml_backend_buffer_type_t buft = it.first;
  2296. ggml_context * ctx = it.second;
  2297. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  2298. if (!buf) {
  2299. LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__);
  2300. return false;
  2301. }
  2302. ggml_backend_buffer_clear(buf, 0);
  2303. LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
  2304. cache.bufs.push_back(buf);
  2305. }
  2306. return true;
  2307. }
  2308. // find an empty slot of size "n_tokens" in the cache
  2309. // updates the cache head
  2310. // Note: On success, it's important that cache.head points
  2311. // to the first cell of the slot.
  2312. static bool llama_kv_cache_find_slot(
  2313. struct llama_kv_cache & cache,
  2314. const struct llama_batch & batch) {
  2315. const uint32_t n_tokens = batch.n_tokens;
  2316. if (cache.recurrent) {
  2317. // For recurrent state architectures (like Mamba),
  2318. // each KV cache cell can store the state for a whole sequence.
  2319. llama_seq_id min = cache.size - 1;
  2320. llama_seq_id max = 0;
  2321. for (uint32_t i = 0; i < n_tokens; ++i) {
  2322. for (int32_t j = 0; j < batch.n_seq_id[i]; ++j) {
  2323. llama_seq_id seq_id = batch.seq_id[i][j];
  2324. // make sure it's a valid seq_id
  2325. if ((uint32_t) seq_id < cache.size) {
  2326. if (seq_id > max) {
  2327. max = seq_id;
  2328. }
  2329. if (seq_id < min) {
  2330. min = seq_id;
  2331. }
  2332. // Assuming the tokens are in-order
  2333. if (batch.pos[i] != cache.cells[seq_id].pos + 1) {
  2334. // What should happen when the pos backtracks or skips a value?
  2335. // Clearing the state mid-batch would require special-casing which isn't done.
  2336. LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d\n",
  2337. __func__, batch.pos[i], cache.cells[seq_id].pos, seq_id);
  2338. }
  2339. if (cache.cells[seq_id].pos < 0 && 0 <= batch.pos[i]) {
  2340. cache.used += 1;
  2341. }
  2342. cache.cells[seq_id].pos = batch.pos[i];
  2343. // NOTE: seq_ids are not inserted here; they are handled when the input tensors are set
  2344. } else {
  2345. // too big seq_id
  2346. // TODO: would it be possible to resize the KV cache size instead?
  2347. LLAMA_LOG_ERROR("%s: seq_id=%d >= kv_size=%d Try using a bigger --parallel value\n", __func__, seq_id, cache.size);
  2348. return false;
  2349. }
  2350. }
  2351. }
  2352. // allow getting the range of used cells, from head to head + n
  2353. cache.head = min;
  2354. cache.n = max - min + 1;
  2355. // sanity check
  2356. return max >= min;
  2357. }
  2358. // otherwise, one cell per token.
  2359. if (n_tokens > cache.size) {
  2360. LLAMA_LOG_ERROR("%s: n_tokens=%d > cache.size=%d\n", __func__, n_tokens, cache.size);
  2361. return false;
  2362. }
  2363. uint32_t n_tested = 0;
  2364. while (true) {
  2365. if (cache.head + n_tokens > cache.size) {
  2366. n_tested += cache.size - cache.head;
  2367. cache.head = 0;
  2368. continue;
  2369. }
  2370. bool found = true;
  2371. for (uint32_t i = 0; i < n_tokens; i++) {
  2372. if (cache.cells[cache.head + i].pos >= 0) {
  2373. found = false;
  2374. cache.head += i + 1;
  2375. n_tested += i + 1;
  2376. break;
  2377. }
  2378. }
  2379. if (found) {
  2380. break;
  2381. }
  2382. if (n_tested >= cache.size) {
  2383. //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
  2384. return false;
  2385. }
  2386. }
  2387. for (uint32_t i = 0; i < n_tokens; i++) {
  2388. cache.cells[cache.head + i].pos = batch.pos[i];
  2389. for (int32_t j = 0; j < batch.n_seq_id[i]; j++) {
  2390. cache.cells[cache.head + i].seq_id.insert(batch.seq_id[i][j]);
  2391. }
  2392. }
  2393. cache.used += n_tokens;
  2394. return true;
  2395. }
  2396. // find how many cells are currently in use
  2397. static uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
  2398. for (uint32_t i = cache.size; i > 0; --i) {
  2399. const llama_kv_cell & cell = cache.cells[i - 1];
  2400. if (cell.pos >= 0 && !cell.is_empty()) {
  2401. return i;
  2402. }
  2403. }
  2404. return 0;
  2405. }
  2406. static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
  2407. for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
  2408. cache.cells[i].pos = -1;
  2409. cache.cells[i].seq_id.clear();
  2410. }
  2411. cache.head = 0;
  2412. cache.used = 0;
  2413. for (auto & buf : cache.bufs) {
  2414. ggml_backend_buffer_clear(buf, 0);
  2415. }
  2416. }
  2417. static bool llama_kv_cache_seq_rm(
  2418. struct llama_kv_cache & cache,
  2419. llama_seq_id seq_id,
  2420. llama_pos p0,
  2421. llama_pos p1) {
  2422. uint32_t new_head = cache.size;
  2423. if (p0 < 0) p0 = 0;
  2424. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  2425. // models like Mamba can't have a state partially erased
  2426. if (cache.recurrent) {
  2427. if (seq_id >= (int64_t) cache.size) {
  2428. // could be fatal
  2429. return false;
  2430. }
  2431. if (0 <= seq_id) {
  2432. // partial intersection is invalid
  2433. if ((0 < p0 && p0 <= cache.cells[seq_id].pos) || (0 < p1 && p1 <= cache.cells[seq_id].pos)) {
  2434. return false;
  2435. }
  2436. } else {
  2437. // seq_id is negative, then the range should include everything or nothing
  2438. if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
  2439. return false;
  2440. }
  2441. }
  2442. }
  2443. for (uint32_t i = 0; i < cache.size; ++i) {
  2444. if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  2445. if (seq_id < 0) {
  2446. cache.cells[i].seq_id.clear();
  2447. } else if (cache.cells[i].has_seq_id(seq_id)) {
  2448. cache.cells[i].seq_id.erase(seq_id);
  2449. } else {
  2450. continue;
  2451. }
  2452. if (cache.cells[i].is_empty()) {
  2453. // keep count of the number of used cells
  2454. if (cache.cells[i].pos >= 0) cache.used--;
  2455. cache.cells[i].pos = -1;
  2456. if (new_head == cache.size) new_head = i;
  2457. }
  2458. }
  2459. }
  2460. // If we freed up a slot, set head to it so searching can start there.
  2461. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  2462. return true;
  2463. }
  2464. static void llama_kv_cache_seq_cp(
  2465. struct llama_kv_cache & cache,
  2466. llama_seq_id seq_id_src,
  2467. llama_seq_id seq_id_dst,
  2468. llama_pos p0,
  2469. llama_pos p1) {
  2470. if (p0 < 0) p0 = 0;
  2471. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  2472. if (cache.recurrent) {
  2473. if ((uint32_t) seq_id_dst < cache.size && (uint32_t) seq_id_src < cache.size) {
  2474. seq_id_src = cache.cells[seq_id_src].src;
  2475. GGML_ASSERT((uint32_t) seq_id_src < cache.size);
  2476. // intent to "copy from"
  2477. // supports copy chains thanks to taking the source of the source
  2478. cache.cells[seq_id_dst].src = seq_id_src;
  2479. // preserve the "keep or clear" status of the copied sequence
  2480. if (cache.cells[seq_id_src].has_seq_id(seq_id_src)) {
  2481. cache.cells[seq_id_dst].seq_id.insert(seq_id_dst);
  2482. } else {
  2483. cache.cells[seq_id_dst].seq_id.erase(seq_id_dst);
  2484. }
  2485. cache.do_copy = true;
  2486. cache.cells[seq_id_dst].pos = cache.cells[seq_id_src].pos;
  2487. }
  2488. return;
  2489. }
  2490. // otherwise, this is the KV cache of a Transformer-like model
  2491. cache.head = 0;
  2492. for (uint32_t i = 0; i < cache.size; ++i) {
  2493. if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  2494. cache.cells[i].seq_id.insert(seq_id_dst);
  2495. }
  2496. }
  2497. }
  2498. static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
  2499. uint32_t new_head = cache.size;
  2500. for (uint32_t i = 0; i < cache.size; ++i) {
  2501. if (!cache.cells[i].has_seq_id(seq_id)) {
  2502. if (cache.cells[i].pos >= 0) cache.used--;
  2503. cache.cells[i].pos = -1;
  2504. cache.cells[i].seq_id.clear();
  2505. if (new_head == cache.size) new_head = i;
  2506. } else {
  2507. cache.cells[i].seq_id.clear();
  2508. cache.cells[i].seq_id.insert(seq_id);
  2509. }
  2510. }
  2511. // If we freed up a slot, set head to it so searching can start there.
  2512. if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
  2513. }
  2514. static void llama_kv_cache_seq_add(
  2515. struct llama_kv_cache & cache,
  2516. llama_seq_id seq_id,
  2517. llama_pos p0,
  2518. llama_pos p1,
  2519. llama_pos delta) {
  2520. uint32_t new_head = cache.size;
  2521. if (p0 < 0) p0 = 0;
  2522. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  2523. if (cache.recurrent) {
  2524. // for Mamba-like models, only the pos needs to be shifted
  2525. if (0 <= seq_id && seq_id < (int64_t) cache.size) {
  2526. llama_kv_cell & cell = cache.cells[seq_id];
  2527. if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
  2528. cell.pos += delta;
  2529. }
  2530. }
  2531. return;
  2532. }
  2533. for (uint32_t i = 0; i < cache.size; ++i) {
  2534. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  2535. cache.has_shift = true;
  2536. cache.cells[i].pos += delta;
  2537. cache.cells[i].delta += delta;
  2538. if (cache.cells[i].pos < 0) {
  2539. if (!cache.cells[i].is_empty()) {
  2540. cache.used--;
  2541. }
  2542. cache.cells[i].pos = -1;
  2543. cache.cells[i].seq_id.clear();
  2544. if (new_head == cache.size) {
  2545. new_head = i;
  2546. }
  2547. }
  2548. }
  2549. }
  2550. // If we freed up a slot, set head to it so searching can start there.
  2551. // Otherwise we just start the next search from the beginning.
  2552. cache.head = new_head != cache.size ? new_head : 0;
  2553. }
  2554. static void llama_kv_cache_seq_div(
  2555. struct llama_kv_cache & cache,
  2556. llama_seq_id seq_id,
  2557. llama_pos p0,
  2558. llama_pos p1,
  2559. int d) {
  2560. if (p0 < 0) p0 = 0;
  2561. if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
  2562. if (cache.recurrent) {
  2563. // for Mamba-like models, only the pos needs to be changed
  2564. if (0 <= seq_id && seq_id < (int64_t) cache.size) {
  2565. llama_kv_cell & cell = cache.cells[seq_id];
  2566. if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
  2567. cell.pos /= d;
  2568. }
  2569. }
  2570. return;
  2571. }
  2572. for (uint32_t i = 0; i < cache.size; ++i) {
  2573. if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
  2574. cache.has_shift = true;
  2575. {
  2576. llama_pos p_old = cache.cells[i].pos;
  2577. cache.cells[i].pos /= d;
  2578. cache.cells[i].delta += cache.cells[i].pos - p_old;
  2579. }
  2580. }
  2581. }
  2582. }
  2583. static llama_pos llama_kv_cache_seq_pos_max(struct llama_kv_cache & cache, llama_seq_id seq_id) {
  2584. llama_pos result = 0;
  2585. for (uint32_t i = 0; i < cache.size; ++i) {
  2586. if (cache.cells[i].has_seq_id(seq_id)) {
  2587. result = std::max(result, cache.cells[i].pos);
  2588. }
  2589. }
  2590. return result;
  2591. }
  2592. static void llama_kv_cache_defrag(struct llama_kv_cache & cache) {
  2593. cache.do_defrag = true;
  2594. }
  2595. static uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) {
  2596. // the FA kernels require padding to avoid extra runtime boundary checks
  2597. return cparams.flash_attn ? 256u : 32u;
  2598. }
  2599. //
  2600. // model loading and saving
  2601. //
  2602. enum llama_fver {
  2603. GGUF_FILE_VERSION_V1 = 1,
  2604. GGUF_FILE_VERSION_V2 = 2,
  2605. GGUF_FILE_VERSION_V3 = 3,
  2606. };
  2607. static const char * llama_file_version_name(llama_fver version) {
  2608. switch (version) {
  2609. case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
  2610. case GGUF_FILE_VERSION_V2: return "GGUF V2";
  2611. case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
  2612. }
  2613. return "unknown";
  2614. }
  2615. static std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
  2616. char buf[256];
  2617. snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
  2618. for (size_t i = 1; i < ne.size(); i++) {
  2619. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
  2620. }
  2621. return buf;
  2622. }
  2623. static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
  2624. char buf[256];
  2625. snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
  2626. for (int i = 1; i < GGML_MAX_DIMS; i++) {
  2627. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
  2628. }
  2629. return buf;
  2630. }
  2631. namespace GGUFMeta {
  2632. template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int)>
  2633. struct GKV_Base_Type {
  2634. static constexpr gguf_type gt = gt_;
  2635. static T getter(const gguf_context * ctx, const int kid) {
  2636. return gfun(ctx, kid);
  2637. }
  2638. };
  2639. template<typename T> struct GKV_Base;
  2640. template<> struct GKV_Base<bool >: GKV_Base_Type<bool, GGUF_TYPE_BOOL, gguf_get_val_bool> {};
  2641. template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, GGUF_TYPE_UINT8, gguf_get_val_u8 > {};
  2642. template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, GGUF_TYPE_UINT16, gguf_get_val_u16 > {};
  2643. template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, GGUF_TYPE_UINT32, gguf_get_val_u32 > {};
  2644. template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, GGUF_TYPE_UINT64, gguf_get_val_u64 > {};
  2645. template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, GGUF_TYPE_INT8, gguf_get_val_i8 > {};
  2646. template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, GGUF_TYPE_INT16, gguf_get_val_i16 > {};
  2647. template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, GGUF_TYPE_INT32, gguf_get_val_i32 > {};
  2648. template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, GGUF_TYPE_INT64, gguf_get_val_i64 > {};
  2649. template<> struct GKV_Base<float >: GKV_Base_Type<float, GGUF_TYPE_FLOAT32, gguf_get_val_f32 > {};
  2650. template<> struct GKV_Base<double >: GKV_Base_Type<double, GGUF_TYPE_FLOAT64, gguf_get_val_f64 > {};
  2651. template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, GGUF_TYPE_STRING, gguf_get_val_str > {};
  2652. template<> struct GKV_Base<std::string> {
  2653. static constexpr gguf_type gt = GGUF_TYPE_STRING;
  2654. static std::string getter(const gguf_context * ctx, const int kid) {
  2655. return gguf_get_val_str(ctx, kid);
  2656. }
  2657. };
  2658. struct ArrayInfo {
  2659. const gguf_type gt;
  2660. const size_t length;
  2661. const void * data;
  2662. };
  2663. template<> struct GKV_Base<ArrayInfo> {
  2664. public:
  2665. static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
  2666. static ArrayInfo getter(const gguf_context *ctx, const int k) {
  2667. return ArrayInfo {
  2668. gguf_get_arr_type(ctx, k),
  2669. size_t(gguf_get_arr_n(ctx, k)),
  2670. gguf_get_arr_data(ctx, k),
  2671. };
  2672. }
  2673. };
  2674. template<typename T>
  2675. class GKV : public GKV_Base<T> {
  2676. GKV() = delete;
  2677. public:
  2678. static T get_kv(const gguf_context * ctx, const int k) {
  2679. const enum gguf_type kt = gguf_get_kv_type(ctx, k);
  2680. if (kt != GKV::gt) {
  2681. throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
  2682. gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt)));
  2683. }
  2684. return GKV::getter(ctx, k);
  2685. }
  2686. static const char * override_type_to_str(const llama_model_kv_override_type ty) {
  2687. switch (ty) {
  2688. case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool";
  2689. case LLAMA_KV_OVERRIDE_TYPE_INT: return "int";
  2690. case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float";
  2691. case LLAMA_KV_OVERRIDE_TYPE_STR: return "str";
  2692. }
  2693. return "unknown";
  2694. }
  2695. static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) {
  2696. if (!ovrd) { return false; }
  2697. if (ovrd->tag == expected_type) {
  2698. LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
  2699. __func__, override_type_to_str(ovrd->tag), ovrd->key);
  2700. switch (ovrd->tag) {
  2701. case LLAMA_KV_OVERRIDE_TYPE_BOOL: {
  2702. LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false");
  2703. } break;
  2704. case LLAMA_KV_OVERRIDE_TYPE_INT: {
  2705. LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64);
  2706. } break;
  2707. case LLAMA_KV_OVERRIDE_TYPE_FLOAT: {
  2708. LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64);
  2709. } break;
  2710. case LLAMA_KV_OVERRIDE_TYPE_STR: {
  2711. LLAMA_LOG_INFO("%s\n", ovrd->val_str);
  2712. } break;
  2713. default:
  2714. // Shouldn't be possible to end up here, but just in case...
  2715. throw std::runtime_error(
  2716. format("Unsupported attempt to override %s type for metadata key %s\n",
  2717. override_type_to_str(ovrd->tag), ovrd->key));
  2718. }
  2719. return true;
  2720. }
  2721. LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
  2722. __func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag));
  2723. return false;
  2724. }
  2725. template<typename OT>
  2726. static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
  2727. try_override(OT & target, const struct llama_model_kv_override * ovrd) {
  2728. if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) {
  2729. target = ovrd->val_bool;
  2730. return true;
  2731. }
  2732. return false;
  2733. }
  2734. template<typename OT>
  2735. static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
  2736. try_override(OT & target, const struct llama_model_kv_override * ovrd) {
  2737. if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) {
  2738. target = ovrd->val_i64;
  2739. return true;
  2740. }
  2741. return false;
  2742. }
  2743. template<typename OT>
  2744. static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
  2745. try_override(T & target, const struct llama_model_kv_override * ovrd) {
  2746. if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) {
  2747. target = ovrd->val_f64;
  2748. return true;
  2749. }
  2750. return false;
  2751. }
  2752. template<typename OT>
  2753. static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
  2754. try_override(T & target, const struct llama_model_kv_override * ovrd) {
  2755. if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) {
  2756. target = ovrd->val_str;
  2757. return true;
  2758. }
  2759. return false;
  2760. }
  2761. static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
  2762. if (try_override<T>(target, ovrd)) {
  2763. return true;
  2764. }
  2765. if (k < 0) { return false; }
  2766. target = get_kv(ctx, k);
  2767. return true;
  2768. }
  2769. static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
  2770. return set(ctx, gguf_find_key(ctx, key), target, ovrd);
  2771. }
  2772. static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
  2773. return set(ctx, key.c_str(), target, ovrd);
  2774. }
  2775. };
  2776. }
  2777. using llama_buf_map = std::unordered_map<uint32_t, ggml_backend_buffer_t>;
  2778. struct llama_model_loader {
  2779. int n_kv = 0;
  2780. int n_tensors = 0;
  2781. int n_created = 0;
  2782. int64_t n_elements = 0;
  2783. size_t n_bytes = 0;
  2784. bool use_mmap = false;
  2785. bool check_tensors;
  2786. llama_files files;
  2787. llama_ftype ftype;
  2788. llama_fver fver;
  2789. llama_mmaps mappings;
  2790. // Holds information on a model weight
  2791. struct llama_tensor_weight {
  2792. uint16_t idx; // source file index
  2793. size_t offs; // tensor data offset in the original file
  2794. ggml_tensor * tensor;
  2795. llama_tensor_weight(const llama_file * file, uint16_t idx, const char * name, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) {
  2796. const int tensor_idx = gguf_find_tensor(gguf_ctx, name);
  2797. offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx);
  2798. if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) {
  2799. throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", name));
  2800. }
  2801. }
  2802. };
  2803. std::vector<llama_tensor_weight> weights;
  2804. std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
  2805. struct gguf_context * meta = NULL;
  2806. std::vector<ggml_context *> contexts;
  2807. std::string arch_name;
  2808. LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
  2809. llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) {
  2810. int trace = 0;
  2811. if (getenv("LLAMA_TRACE")) {
  2812. trace = atoi(getenv("LLAMA_TRACE"));
  2813. }
  2814. if (param_overrides_p != nullptr) {
  2815. for (const struct llama_model_kv_override *p = param_overrides_p; p->key[0] != 0; p++) {
  2816. kv_overrides.insert({std::string(p->key), *p});
  2817. }
  2818. }
  2819. struct ggml_context * ctx = NULL;
  2820. struct gguf_init_params params = {
  2821. /*.no_alloc = */ true,
  2822. /*.ctx = */ &ctx,
  2823. };
  2824. meta = gguf_init_from_file(fname.c_str(), params);
  2825. if (!meta) {
  2826. throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
  2827. }
  2828. get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
  2829. llm_kv = LLM_KV(llm_arch_from_string(arch_name));
  2830. files.emplace_back(new llama_file(fname.c_str(), "rb"));
  2831. contexts.emplace_back(ctx);
  2832. // Save tensors data offset of the main file.
  2833. // For subsidiary files, `meta` tensor data offset must not be used,
  2834. // so we build a unified tensors index for weights.
  2835. for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
  2836. weights.emplace_back(files.back().get(), 0, cur->name, meta, cur);
  2837. }
  2838. uint16_t n_split = 0;
  2839. get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false);
  2840. // Load additional GGML contexts
  2841. if (n_split > 1) {
  2842. uint16_t idx = 0;
  2843. get_key(llm_kv(LLM_KV_SPLIT_NO), idx);
  2844. if (idx != 0) {
  2845. throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx));
  2846. }
  2847. char split_prefix[PATH_MAX] = {0};
  2848. if (!llama_split_prefix(split_prefix, sizeof(split_prefix), fname.c_str(), idx, n_split)) {
  2849. throw std::runtime_error(format("invalid split file: %s", fname.c_str()));
  2850. }
  2851. if (trace > 0) {
  2852. LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
  2853. }
  2854. char split_path[PATH_MAX] = {0};
  2855. for (idx = 1; idx < n_split; idx++) {
  2856. llama_split_path(split_path, sizeof(split_path), split_prefix, idx, n_split);
  2857. struct gguf_init_params split_params = {
  2858. /*.no_alloc = */ true,
  2859. /*.ctx = */ &ctx,
  2860. };
  2861. struct gguf_context * ctx_gguf = gguf_init_from_file(split_path, split_params);
  2862. if (!ctx_gguf) {
  2863. throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path));
  2864. }
  2865. files.emplace_back(new llama_file(split_path, "rb"));
  2866. contexts.emplace_back(ctx);
  2867. // Save tensors data offset info of the shard.
  2868. for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
  2869. weights.emplace_back(files.back().get(), idx, cur->name, ctx_gguf, cur);
  2870. }
  2871. gguf_free(ctx_gguf);
  2872. }
  2873. get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors);
  2874. // sanity check
  2875. {
  2876. const int n_tensors_loaded = (int) weights.size();
  2877. if (n_tensors != n_tensors_loaded) {
  2878. throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded));
  2879. }
  2880. }
  2881. LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1);
  2882. }
  2883. n_kv = gguf_get_n_kv(meta);
  2884. n_tensors = weights.size();
  2885. fver = (enum llama_fver) gguf_get_version(meta);
  2886. std::set<std::string> tensor_names;
  2887. for (auto & w : weights) {
  2888. n_elements += ggml_nelements(w.tensor);
  2889. n_bytes += ggml_nbytes(w.tensor);
  2890. // make sure there is no duplicated tensor names
  2891. const std::string name(w.tensor->name);
  2892. auto found = tensor_names.find(name);
  2893. if (found != tensor_names.end()) {
  2894. throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", w.tensor->name));
  2895. }
  2896. tensor_names.insert(name);
  2897. }
  2898. LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
  2899. __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
  2900. // determine file type based on the number of tensors for each quantization and print meta data
  2901. // TODO: make optional
  2902. {
  2903. std::map<enum ggml_type, uint32_t> n_type;
  2904. uint32_t n_type_max = 0;
  2905. enum ggml_type type_max = GGML_TYPE_F32;
  2906. for (int i = 0; i < n_tensors; i++) {
  2907. const ggml_tensor * tensor = weights.at(i).tensor;
  2908. enum ggml_type type = tensor->type;
  2909. n_type[type]++;
  2910. if (n_type_max < n_type[type]) {
  2911. n_type_max = n_type[type];
  2912. type_max = type;
  2913. }
  2914. if (trace > 0) {
  2915. const uint16_t sid = weights.at(i).idx;
  2916. LLAMA_LOG_INFO("%s: - tensor %4d, split %2d: %32s %-8s [ %s ]\n", __func__, i, sid, ggml_get_name(tensor), ggml_type_name(type), llama_format_tensor_shape(tensor).c_str());
  2917. }
  2918. }
  2919. switch (type_max) {
  2920. case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
  2921. case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
  2922. case GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break;
  2923. case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
  2924. case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
  2925. case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
  2926. case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
  2927. case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
  2928. case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
  2929. case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
  2930. case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
  2931. case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
  2932. case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
  2933. case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
  2934. case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
  2935. case GGML_TYPE_IQ2_S: ftype = LLAMA_FTYPE_MOSTLY_IQ2_S; break;
  2936. case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
  2937. case GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break;
  2938. case GGML_TYPE_IQ1_M: ftype = LLAMA_FTYPE_MOSTLY_IQ1_M; break;
  2939. case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
  2940. case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
  2941. case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
  2942. default:
  2943. {
  2944. LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
  2945. ftype = LLAMA_FTYPE_ALL_F32;
  2946. } break;
  2947. }
  2948. // this is a way to mark that we have "guessed" the file type
  2949. ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
  2950. {
  2951. const int kid = gguf_find_key(meta, "general.file_type");
  2952. if (kid >= 0) {
  2953. ftype = (llama_ftype) gguf_get_val_u32(meta, kid);
  2954. }
  2955. }
  2956. LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
  2957. for (int i = 0; i < n_kv; i++) {
  2958. const char * name = gguf_get_key(meta, i);
  2959. const enum gguf_type type = gguf_get_kv_type(meta, i);
  2960. const std::string type_name =
  2961. type == GGUF_TYPE_ARRAY
  2962. ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta, i)), gguf_get_arr_n(meta, i))
  2963. : gguf_type_name(type);
  2964. std::string value = gguf_kv_to_str(meta, i);
  2965. const size_t MAX_VALUE_LEN = 40;
  2966. if (value.size() > MAX_VALUE_LEN) {
  2967. value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
  2968. }
  2969. replace_all(value, "\n", "\\n");
  2970. LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
  2971. }
  2972. // print type counts
  2973. for (auto & kv : n_type) {
  2974. if (kv.second == 0) {
  2975. continue;
  2976. }
  2977. LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
  2978. }
  2979. }
  2980. if (!llama_mmap::SUPPORTED) {
  2981. LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
  2982. use_mmap = false;
  2983. }
  2984. this->use_mmap = use_mmap;
  2985. this->check_tensors = check_tensors;
  2986. }
  2987. ~llama_model_loader() {
  2988. if (meta) {
  2989. gguf_free(meta);
  2990. }
  2991. for (auto * ctx : contexts) {
  2992. ggml_free(ctx);
  2993. }
  2994. }
  2995. template<typename T>
  2996. typename std::enable_if<std::is_integral<T>::value, bool>::type
  2997. get_arr_n(const std::string & key, T & result, const bool required = true) {
  2998. const int kid = gguf_find_key(meta, key.c_str());
  2999. if (kid < 0) {
  3000. if (required) {
  3001. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  3002. }
  3003. return false;
  3004. }
  3005. struct GGUFMeta::ArrayInfo arr_info =
  3006. GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta, kid);
  3007. result = arr_info.length;
  3008. return true;
  3009. }
  3010. template<typename T>
  3011. typename std::enable_if<std::is_integral<T>::value, bool>::type
  3012. get_arr_n(const enum llm_kv kid, T & result, const bool required = true) {
  3013. return get_arr_n(llm_kv(kid), result, required);
  3014. }
  3015. template<typename T>
  3016. bool get_arr(const std::string & key, std::vector<T> & result, const bool required = true) {
  3017. const int kid = gguf_find_key(meta, key.c_str());
  3018. if (kid < 0) {
  3019. if (required) {
  3020. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  3021. }
  3022. return false;
  3023. }
  3024. struct GGUFMeta::ArrayInfo arr_info =
  3025. GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta, kid);
  3026. if (arr_info.gt != GGUF_TYPE_FLOAT32 && arr_info.gt != GGUF_TYPE_INT32) {
  3027. throw std::runtime_error(format("%s is not a float32 or int32 array", key.c_str()));
  3028. }
  3029. // GGML_ASSERT(gguf_type_size(arr_info.gt) == sizeof(T));
  3030. GGML_ASSERT((arr_info.gt != GGUF_TYPE_FLOAT32 || std::is_same<T, float>::value));
  3031. GGML_ASSERT((arr_info.gt != GGUF_TYPE_INT32 || std::is_same<T, int>::value));
  3032. result.resize(arr_info.length);
  3033. result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length);
  3034. return true;
  3035. }
  3036. template<typename T>
  3037. bool get_arr(const enum llm_kv kid, T& result, const bool required = true) {
  3038. return get_arr(llm_kv(kid), result, required);
  3039. }
  3040. template<typename T>
  3041. bool get_key(const std::string & key, T & result, const bool required = true) {
  3042. auto it = kv_overrides.find(key);
  3043. const struct llama_model_kv_override * override =
  3044. it != kv_overrides.end() ? &it->second : nullptr;
  3045. const bool found = GGUFMeta::GKV<T>::set(meta, key, result, override);
  3046. if (required && !found) {
  3047. throw std::runtime_error(format("key not found in model: %s", key.c_str()));
  3048. }
  3049. return found;
  3050. }
  3051. template<typename T>
  3052. bool get_key(const enum llm_kv kid, T & result, const bool required = true) {
  3053. return get_key(llm_kv(kid), result, required);
  3054. }
  3055. std::string get_arch_name() const {
  3056. return arch_name;
  3057. }
  3058. enum llm_arch get_arch() const {
  3059. return llm_kv.arch;
  3060. }
  3061. const char * get_tensor_name(int i) const {
  3062. return weights.at(i).tensor->name;
  3063. }
  3064. const llama_tensor_weight * get_weight(const char * name) const {
  3065. for (const auto & weight : weights) {
  3066. if (strcmp(name, weight.tensor->name) == 0) {
  3067. return &weight;
  3068. }
  3069. }
  3070. return nullptr;
  3071. }
  3072. const llama_tensor_weight * get_weight(int i) const {
  3073. return get_weight(get_tensor_name(i));
  3074. }
  3075. const llama_tensor_weight & require_weight(const char * name) const {
  3076. const llama_tensor_weight * weight = get_weight(name);
  3077. if (!weight) {
  3078. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
  3079. }
  3080. return *weight;
  3081. }
  3082. struct ggml_tensor * get_tensor_meta(const char * name) const {
  3083. const auto * weight = get_weight(name);
  3084. if (!weight) {
  3085. return nullptr;
  3086. }
  3087. return weight->tensor;
  3088. }
  3089. struct ggml_tensor * require_tensor_meta(const char * name) const {
  3090. struct ggml_tensor * tensor = get_tensor_meta(name);
  3091. if (!tensor) {
  3092. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
  3093. }
  3094. return tensor;
  3095. }
  3096. struct ggml_tensor * get_tensor_meta(int i) const {
  3097. return get_tensor_meta(get_tensor_name(i));
  3098. }
  3099. struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, const struct ggml_tensor * cur, bool duplicated) {
  3100. struct ggml_tensor * tensor = ggml_dup_tensor(ctx, cur);
  3101. ggml_set_name(tensor, ggml_get_name(cur));
  3102. if (duplicated) {
  3103. size_data += ggml_nbytes(cur);
  3104. } else {
  3105. n_created++;
  3106. }
  3107. return tensor;
  3108. }
  3109. const struct ggml_tensor * check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const {
  3110. const struct ggml_tensor * cur = get_tensor_meta(name.c_str());
  3111. if (cur == NULL) {
  3112. if (!required) {
  3113. return NULL;
  3114. }
  3115. throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
  3116. }
  3117. {
  3118. bool is_ok = true;
  3119. for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
  3120. if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) {
  3121. is_ok = false;
  3122. break;
  3123. }
  3124. }
  3125. if (!is_ok) {
  3126. throw std::runtime_error(
  3127. format("%s: tensor '%s' has wrong shape; expected %s, got %s",
  3128. __func__, name.c_str(),
  3129. llama_format_tensor_shape(ne).c_str(),
  3130. llama_format_tensor_shape(cur).c_str()));
  3131. }
  3132. }
  3133. return cur;
  3134. }
  3135. static const int TENSOR_NOT_REQUIRED = 1;
  3136. static const int TENSOR_DUPLICATED = 2;
  3137. struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, int flags = 0) {
  3138. const struct ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED));
  3139. if (cur == NULL) {
  3140. return NULL;
  3141. }
  3142. return create_tensor_for(ctx, cur, flags & TENSOR_DUPLICATED);
  3143. }
  3144. struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::vector<int64_t> & ne, size_t offset, bool required = true) {
  3145. const struct ggml_tensor * cur = check_tensor_dims(name, ne, required);
  3146. if (cur == NULL) {
  3147. return NULL;
  3148. }
  3149. if (cur->type != base->type) {
  3150. throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), ggml_type_name(base->type), ggml_type_name(cur->type)));
  3151. }
  3152. std::array<int64_t, GGML_MAX_DIMS> dims;
  3153. for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
  3154. dims[i] = i < ne.size() ? ne[i] : 1;
  3155. }
  3156. struct ggml_tensor * tensor = ggml_view_4d(ctx, base,
  3157. dims[0], dims[1], dims[2], dims[3],
  3158. cur->nb[1], cur->nb[2], cur->nb[3],
  3159. offset);
  3160. ggml_set_name(tensor, name.c_str());
  3161. n_created++;
  3162. return tensor;
  3163. }
  3164. void done_getting_tensors() const {
  3165. if (n_created != n_tensors) {
  3166. throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
  3167. }
  3168. }
  3169. void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr) {
  3170. if (use_mmap) {
  3171. mappings.reserve(files.size());
  3172. mmaps_used.reserve(files.size());
  3173. for (const auto & file : files) {
  3174. std::unique_ptr<llama_mmap> mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, ggml_is_numa()));
  3175. mmaps_used.emplace_back(mapping->size, 0);
  3176. if (mlock_mmaps) {
  3177. std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
  3178. mlock_mmap->init(mapping->addr);
  3179. mlock_mmaps->emplace_back(std::move(mlock_mmap));
  3180. }
  3181. mappings.emplace_back(std::move(mapping));
  3182. }
  3183. }
  3184. // compute the total size of all tensors for progress reporting
  3185. for (auto & w : weights) {
  3186. size_data += ggml_nbytes(w.tensor);
  3187. }
  3188. }
  3189. void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, ggml_context * ctx) const {
  3190. GGML_ASSERT(!mappings.empty());
  3191. const auto & mapping = mappings.at(idx);
  3192. *first = mapping->size;
  3193. *last = 0;
  3194. *addr = mapping->addr;
  3195. for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) {
  3196. try {
  3197. const auto * weight = get_weight(ggml_get_name(tensor));
  3198. if (!weight) {
  3199. continue;
  3200. }
  3201. if (weight->idx != idx) {
  3202. continue;
  3203. }
  3204. *first = std::min(*first, weight->offs);
  3205. *last = std::max(*last, weight->offs + ggml_nbytes(tensor));
  3206. } catch(...) {
  3207. // the tensor is not in the model
  3208. }
  3209. }
  3210. }
  3211. // for backwards compatibility, does not support ggml-backend
  3212. void load_data_for(struct ggml_tensor * cur) const {
  3213. const auto & w = require_weight(ggml_get_name(cur));
  3214. if (use_mmap) {
  3215. const auto & mapping = mappings.at(w.idx);
  3216. if (cur->data == nullptr) {
  3217. cur->data = (uint8_t *)mapping->addr + w.offs;
  3218. } else {
  3219. memcpy(cur->data, (uint8_t *)mapping->addr + w.offs, ggml_nbytes(cur));
  3220. }
  3221. } else {
  3222. GGML_ASSERT(cur->data != nullptr);
  3223. GGML_ASSERT(w.idx < files.size());
  3224. const auto & file = files.at(w.idx);
  3225. file->seek(w.offs, SEEK_SET);
  3226. file->read_raw(cur->data, ggml_nbytes(cur));
  3227. }
  3228. if (check_tensors && !ggml_validate_row_data(cur->type, cur->data, ggml_nbytes(cur))) {
  3229. throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
  3230. }
  3231. }
  3232. size_t size_done = 0;
  3233. size_t size_data = 0;
  3234. std::vector<std::pair<size_t, size_t>> mmaps_used;
  3235. // Returns false if cancelled by progress_callback
  3236. bool load_all_data(
  3237. struct ggml_context * ctx,
  3238. llama_buf_map & bufs_mmap,
  3239. llama_mlocks * lmlocks,
  3240. llama_progress_callback progress_callback,
  3241. void * progress_callback_user_data) {
  3242. GGML_ASSERT(size_data != 0 && "call init_mappings() first");
  3243. std::vector<no_init<uint8_t>> read_buf;
  3244. std::vector<std::future<std::pair<ggml_tensor *, bool>>> validation_result;
  3245. for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
  3246. const auto * weight = get_weight(ggml_get_name(cur));
  3247. if (weight == nullptr) {
  3248. // this can happen with split experts models
  3249. continue;
  3250. }
  3251. if (progress_callback) {
  3252. if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
  3253. return false;
  3254. }
  3255. }
  3256. size_t n_size = ggml_nbytes(cur);
  3257. if (use_mmap) {
  3258. const auto & mapping = mappings.at(weight->idx);
  3259. ggml_backend_buffer_t buf_mmap = nullptr;
  3260. if (bufs_mmap.count(weight->idx)) {
  3261. buf_mmap = bufs_mmap.at(weight->idx);
  3262. }
  3263. uint8_t * data = (uint8_t *) mapping->addr + weight->offs;
  3264. if (check_tensors) {
  3265. validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] {
  3266. return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size));
  3267. }));
  3268. }
  3269. GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated
  3270. if (buf_mmap && cur->data == nullptr) {
  3271. ggml_backend_tensor_alloc(buf_mmap, cur, data);
  3272. if (lmlocks) {
  3273. const auto & lmlock = lmlocks->at(weight->idx);
  3274. lmlock->grow_to(weight->offs + n_size);
  3275. }
  3276. auto & mmap_used = mmaps_used[weight->idx];
  3277. mmap_used.first = std::min(mmap_used.first, weight->offs);
  3278. mmap_used.second = std::max(mmap_used.second, weight->offs + n_size);
  3279. } else {
  3280. ggml_backend_tensor_set(cur, data, 0, n_size);
  3281. }
  3282. } else {
  3283. GGML_ASSERT(weight->idx < files.size());
  3284. const auto & file = files.at(weight->idx);
  3285. if (ggml_backend_buffer_is_host(cur->buffer)) {
  3286. file->seek(weight->offs, SEEK_SET);
  3287. file->read_raw(cur->data, n_size);
  3288. if (check_tensors) {
  3289. validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] {
  3290. return std::make_pair(cur, ggml_validate_row_data(cur->type, cur->data, n_size));
  3291. }));
  3292. }
  3293. } else {
  3294. read_buf.resize(n_size);
  3295. file->seek(weight->offs, SEEK_SET);
  3296. file->read_raw(read_buf.data(), n_size);
  3297. ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size);
  3298. if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) {
  3299. throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
  3300. }
  3301. }
  3302. }
  3303. size_done += n_size;
  3304. }
  3305. // check validation results
  3306. bool validation_failed = false;
  3307. for (auto & future : validation_result) {
  3308. auto result = future.get();
  3309. if (!result.second) {
  3310. LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, ggml_get_name(result.first));
  3311. validation_failed = true;
  3312. }
  3313. }
  3314. if (validation_failed) {
  3315. throw std::runtime_error("found tensors with invalid data");
  3316. }
  3317. // check if this is the last call and do final cleanup
  3318. if (size_done >= size_data) {
  3319. // unmap offloaded tensors and metadata
  3320. if (use_mmap) {
  3321. for (uint32_t idx = 0; idx < mappings.size(); idx++) {
  3322. const auto & mmap_used = mmaps_used.at(idx);
  3323. auto & mapping = mappings.at(idx);
  3324. mapping->unmap_fragment(0, mmap_used.first);
  3325. if (mmap_used.second != 0) {
  3326. mapping->unmap_fragment(mmap_used.second, mapping->size);
  3327. }
  3328. }
  3329. }
  3330. if (progress_callback) {
  3331. // Even though the model is done loading, we still honor
  3332. // cancellation since we need to free allocations.
  3333. return progress_callback(1.0f, progress_callback_user_data);
  3334. }
  3335. }
  3336. return true;
  3337. }
  3338. };
  3339. template<>
  3340. bool llama_model_loader::get_key(const enum llm_kv kid, enum llama_pooling_type & result, const bool required) {
  3341. uint32_t tmp;
  3342. const bool found = get_key(kid, tmp, required);
  3343. if (found) {
  3344. result = (enum llama_pooling_type) tmp;
  3345. } else {
  3346. result = LLAMA_POOLING_TYPE_UNSPECIFIED;
  3347. }
  3348. return found;
  3349. }
  3350. //
  3351. // load LLaMA models
  3352. //
  3353. static const char * llama_model_arch_name(llm_arch arch) {
  3354. auto it = LLM_ARCH_NAMES.find(arch);
  3355. if (it == LLM_ARCH_NAMES.end()) {
  3356. return "unknown";
  3357. }
  3358. return it->second;
  3359. }
  3360. static std::string llama_model_ftype_name(llama_ftype ftype) {
  3361. if (ftype & LLAMA_FTYPE_GUESSED) {
  3362. return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
  3363. }
  3364. switch (ftype) {
  3365. case LLAMA_FTYPE_ALL_F32: return "all F32";
  3366. case LLAMA_FTYPE_MOSTLY_F16: return "F16";
  3367. case LLAMA_FTYPE_MOSTLY_BF16: return "BF16";
  3368. case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
  3369. case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
  3370. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  3371. return "Q4_1, some F16";
  3372. case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
  3373. case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
  3374. case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
  3375. // K-quants
  3376. case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium";
  3377. case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small";
  3378. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
  3379. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
  3380. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
  3381. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
  3382. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
  3383. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
  3384. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
  3385. case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
  3386. case LLAMA_FTYPE_MOSTLY_IQ2_XXS:return "IQ2_XXS - 2.0625 bpw";
  3387. case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw";
  3388. case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw";
  3389. case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw";
  3390. case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw";
  3391. case LLAMA_FTYPE_MOSTLY_IQ3_XXS:return "IQ3_XXS - 3.0625 bpw";
  3392. case LLAMA_FTYPE_MOSTLY_IQ1_S :return "IQ1_S - 1.5625 bpw";
  3393. case LLAMA_FTYPE_MOSTLY_IQ1_M :return "IQ1_M - 1.75 bpw";
  3394. case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw";
  3395. case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
  3396. case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
  3397. case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
  3398. default: return "unknown, may not work";
  3399. }
  3400. }
  3401. static const char * llama_model_type_name(e_model type) {
  3402. switch (type) {
  3403. case MODEL_14M: return "14M";
  3404. case MODEL_17M: return "17M";
  3405. case MODEL_22M: return "22M";
  3406. case MODEL_33M: return "33M";
  3407. case MODEL_70M: return "70M";
  3408. case MODEL_109M: return "109M";
  3409. case MODEL_137M: return "137M";
  3410. case MODEL_160M: return "160M";
  3411. case MODEL_335M: return "335M";
  3412. case MODEL_410M: return "410M";
  3413. case MODEL_0_5B: return "0.5B";
  3414. case MODEL_1B: return "1B";
  3415. case MODEL_1_4B: return "1.4B";
  3416. case MODEL_2B: return "2B";
  3417. case MODEL_2_8B: return "2.8B";
  3418. case MODEL_3B: return "3B";
  3419. case MODEL_4B: return "4B";
  3420. case MODEL_6_9B: return "6.9B";
  3421. case MODEL_7B: return "7B";
  3422. case MODEL_8B: return "8B";
  3423. case MODEL_12B: return "12B";
  3424. case MODEL_13B: return "13B";
  3425. case MODEL_14B: return "14B";
  3426. case MODEL_15B: return "15B";
  3427. case MODEL_16B: return "16B";
  3428. case MODEL_20B: return "20B";
  3429. case MODEL_30B: return "30B";
  3430. case MODEL_34B: return "34B";
  3431. case MODEL_35B: return "35B";
  3432. case MODEL_40B: return "40B";
  3433. case MODEL_65B: return "65B";
  3434. case MODEL_70B: return "70B";
  3435. case MODEL_236B: return "236B";
  3436. case MODEL_314B: return "314B";
  3437. case MODEL_SMALL: return "0.1B";
  3438. case MODEL_MEDIUM: return "0.4B";
  3439. case MODEL_LARGE: return "0.8B";
  3440. case MODEL_XL: return "1.5B";
  3441. case MODEL_A2_7B: return "A2.7B";
  3442. case MODEL_8x7B: return "8x7B";
  3443. case MODEL_8x22B: return "8x22B";
  3444. case MODEL_16x12B: return "16x12B";
  3445. case MODEL_10B_128x3_66B: return "10B+128x3.66B";
  3446. default: return "?B";
  3447. }
  3448. }
  3449. static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
  3450. switch (type) {
  3451. case LLAMA_VOCAB_TYPE_NONE: return "no vocab";
  3452. case LLAMA_VOCAB_TYPE_SPM: return "SPM";
  3453. case LLAMA_VOCAB_TYPE_BPE: return "BPE";
  3454. case LLAMA_VOCAB_TYPE_WPM: return "WPM";
  3455. default: return "unknown";
  3456. }
  3457. }
  3458. static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
  3459. model.arch = ml.get_arch();
  3460. if (model.arch == LLM_ARCH_UNKNOWN) {
  3461. throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
  3462. }
  3463. }
  3464. static void llm_load_hparams(
  3465. llama_model_loader & ml,
  3466. llama_model & model) {
  3467. auto & hparams = model.hparams;
  3468. const gguf_context * ctx = ml.meta;
  3469. // get metadata as string
  3470. for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
  3471. enum gguf_type type = gguf_get_kv_type(ctx, i);
  3472. if (type == GGUF_TYPE_ARRAY) {
  3473. continue;
  3474. }
  3475. const char * name = gguf_get_key(ctx, i);
  3476. const std::string value = gguf_kv_to_str(ctx, i);
  3477. model.gguf_kv.emplace(name, value);
  3478. }
  3479. // get general kv
  3480. ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
  3481. // get hparams kv
  3482. ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
  3483. // everything past this point is not vocab-related
  3484. if (hparams.vocab_only) {
  3485. return;
  3486. }
  3487. ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
  3488. ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
  3489. ml.get_key(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
  3490. ml.get_key(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head);
  3491. ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer);
  3492. ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
  3493. ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
  3494. GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
  3495. GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
  3496. if (hparams.n_expert > 0) {
  3497. GGML_ASSERT(hparams.n_expert_used > 0);
  3498. } else {
  3499. GGML_ASSERT(hparams.n_expert_used == 0);
  3500. }
  3501. // n_head_kv is optional, default to n_head
  3502. hparams.n_head_kv = hparams.n_head;
  3503. ml.get_key(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv, false);
  3504. bool rope_finetuned = false;
  3505. ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
  3506. hparams.rope_finetuned = rope_finetuned;
  3507. hparams.n_ctx_orig_yarn = hparams.n_ctx_train;
  3508. ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false);
  3509. // rope_freq_base (optional)
  3510. hparams.rope_freq_base_train = 10000.0f;
  3511. ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
  3512. std::string rope_scaling("linear");
  3513. ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
  3514. hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
  3515. GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED);
  3516. // rope_freq_scale (inverse of the kv) is optional
  3517. float ropescale = 0.0f;
  3518. if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
  3519. // try the old key name
  3520. ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
  3521. }
  3522. hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
  3523. ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false);
  3524. // sanity check for n_rot (optional)
  3525. {
  3526. hparams.n_rot = (hparams.n_head == 0) ? 0 : hparams.n_embd / hparams.n_head;
  3527. ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
  3528. if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
  3529. if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
  3530. throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
  3531. }
  3532. }
  3533. // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
  3534. // gpt-j n_rot = rotary_dim
  3535. }
  3536. hparams.n_embd_head_k = (hparams.n_head == 0) ? 0 : hparams.n_embd / hparams.n_head;
  3537. ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
  3538. hparams.n_embd_head_v = (hparams.n_head == 0) ? 0 : hparams.n_embd / hparams.n_head;
  3539. ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
  3540. // arch-specific KVs
  3541. switch (model.arch) {
  3542. case LLM_ARCH_LLAMA:
  3543. {
  3544. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3545. if (hparams.n_expert == 8) {
  3546. switch (hparams.n_layer) {
  3547. case 32: model.type = e_model::MODEL_8x7B; break;
  3548. case 56: model.type = e_model::MODEL_8x22B; break;
  3549. default: model.type = e_model::MODEL_UNKNOWN;
  3550. }
  3551. } else {
  3552. switch (hparams.n_layer) {
  3553. case 22: model.type = e_model::MODEL_1B; break;
  3554. case 26: model.type = e_model::MODEL_3B; break;
  3555. // granite uses a vocab with len 49152
  3556. case 32: model.type = hparams.n_vocab == 49152 ? e_model::MODEL_3B : (hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B); break;
  3557. case 36: model.type = e_model::MODEL_8B; break; // granite
  3558. case 40: model.type = e_model::MODEL_13B; break;
  3559. case 48: model.type = e_model::MODEL_34B; break;
  3560. case 60: model.type = e_model::MODEL_30B; break;
  3561. case 80: model.type = hparams.n_head == hparams.n_head_kv ? e_model::MODEL_65B : e_model::MODEL_70B; break;
  3562. default: model.type = e_model::MODEL_UNKNOWN;
  3563. }
  3564. }
  3565. } break;
  3566. case LLM_ARCH_MINICPM:
  3567. {
  3568. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3569. switch (hparams.n_layer) {
  3570. case 40: model.type = e_model::MODEL_2B; break;
  3571. default: model.type = e_model::MODEL_UNKNOWN;
  3572. }
  3573. } break;
  3574. case LLM_ARCH_GROK:
  3575. {
  3576. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3577. switch (hparams.n_layer) {
  3578. case 64: model.type = e_model::MODEL_314B; break;
  3579. default: model.type = e_model::MODEL_UNKNOWN;
  3580. }
  3581. } break;
  3582. case LLM_ARCH_FALCON:
  3583. {
  3584. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3585. switch (hparams.n_layer) {
  3586. case 32: model.type = e_model::MODEL_7B; break;
  3587. case 60: model.type = e_model::MODEL_40B; break;
  3588. default: model.type = e_model::MODEL_UNKNOWN;
  3589. }
  3590. } break;
  3591. case LLM_ARCH_BAICHUAN:
  3592. {
  3593. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3594. switch (hparams.n_layer) {
  3595. case 32: model.type = e_model::MODEL_7B; break;
  3596. case 40: model.type = e_model::MODEL_13B; break;
  3597. default: model.type = e_model::MODEL_UNKNOWN;
  3598. }
  3599. if (model.type == e_model::MODEL_13B) {
  3600. // TODO: become GGUF KV parameter
  3601. hparams.f_max_alibi_bias = 8.0f;
  3602. }
  3603. } break;
  3604. case LLM_ARCH_STARCODER:
  3605. {
  3606. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3607. switch (hparams.n_layer) {
  3608. case 24: model.type = e_model::MODEL_1B; break;
  3609. case 36: model.type = e_model::MODEL_3B; break;
  3610. case 42: model.type = e_model::MODEL_7B; break;
  3611. case 40: model.type = e_model::MODEL_15B; break;
  3612. default: model.type = e_model::MODEL_UNKNOWN;
  3613. }
  3614. } break;
  3615. case LLM_ARCH_REFACT:
  3616. {
  3617. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3618. switch (hparams.n_layer) {
  3619. case 32: model.type = e_model::MODEL_1B; break;
  3620. default: model.type = e_model::MODEL_UNKNOWN;
  3621. }
  3622. // TODO: become GGUF KV parameter
  3623. hparams.f_max_alibi_bias = 8.0f;
  3624. } break;
  3625. case LLM_ARCH_BERT:
  3626. {
  3627. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3628. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  3629. ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
  3630. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
  3631. switch (hparams.n_layer) {
  3632. case 3:
  3633. model.type = e_model::MODEL_17M; break; // bge-micro
  3634. case 6:
  3635. model.type = e_model::MODEL_22M; break; // MiniLM-L6
  3636. case 12:
  3637. switch (hparams.n_embd) {
  3638. case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small
  3639. case 768: model.type = e_model::MODEL_109M; break; // bge-base
  3640. } break;
  3641. case 24:
  3642. model.type = e_model::MODEL_335M; break; // bge-large
  3643. }
  3644. } break;
  3645. case LLM_ARCH_JINA_BERT_V2:
  3646. {
  3647. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3648. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  3649. ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
  3650. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
  3651. hparams.f_max_alibi_bias = 8.0f;
  3652. switch (hparams.n_layer) {
  3653. case 4: model.type = e_model::MODEL_33M; break; // jina-embeddings-small
  3654. case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base
  3655. }
  3656. } break;
  3657. case LLM_ARCH_NOMIC_BERT:
  3658. {
  3659. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3660. ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
  3661. ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
  3662. ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
  3663. if (hparams.n_layer == 12 && hparams.n_embd == 768) {
  3664. model.type = e_model::MODEL_137M;
  3665. }
  3666. } break;
  3667. case LLM_ARCH_BLOOM:
  3668. {
  3669. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3670. switch (hparams.n_layer) {
  3671. case 24: model.type = e_model::MODEL_1B; break;
  3672. case 30:
  3673. switch (hparams.n_embd) {
  3674. case 2560: model.type = e_model::MODEL_3B; break;
  3675. case 4096: model.type = e_model::MODEL_7B; break;
  3676. } break;
  3677. }
  3678. // TODO: become GGUF KV parameter
  3679. hparams.f_max_alibi_bias = 8.0f;
  3680. } break;
  3681. case LLM_ARCH_MPT:
  3682. {
  3683. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3684. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  3685. ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
  3686. switch (hparams.n_layer) {
  3687. case 32: model.type = e_model::MODEL_7B; break;
  3688. case 48: model.type = e_model::MODEL_30B; break;
  3689. default: model.type = e_model::MODEL_UNKNOWN;
  3690. }
  3691. } break;
  3692. case LLM_ARCH_STABLELM:
  3693. {
  3694. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3695. switch (hparams.n_layer) {
  3696. case 24: model.type = e_model::MODEL_1B; break;
  3697. case 32: model.type = e_model::MODEL_3B; break;
  3698. case 40: model.type = e_model::MODEL_12B; break;
  3699. default: model.type = e_model::MODEL_UNKNOWN;
  3700. }
  3701. } break;
  3702. case LLM_ARCH_QWEN:
  3703. {
  3704. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3705. switch (hparams.n_layer) {
  3706. case 32: model.type = e_model::MODEL_7B; break;
  3707. case 40: model.type = e_model::MODEL_13B; break;
  3708. default: model.type = e_model::MODEL_UNKNOWN;
  3709. }
  3710. } break;
  3711. case LLM_ARCH_QWEN2:
  3712. {
  3713. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3714. switch (hparams.n_layer) {
  3715. case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break;
  3716. case 32: model.type = e_model::MODEL_7B; break;
  3717. case 40: model.type = hparams.n_head == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break;
  3718. case 80: model.type = e_model::MODEL_70B; break;
  3719. default: model.type = e_model::MODEL_UNKNOWN;
  3720. }
  3721. } break;
  3722. case LLM_ARCH_QWEN2MOE:
  3723. {
  3724. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3725. switch (hparams.n_layer) {
  3726. case 24: model.type = e_model::MODEL_A2_7B; break;
  3727. default: model.type = e_model::MODEL_UNKNOWN;
  3728. }
  3729. } break;
  3730. case LLM_ARCH_PHI2:
  3731. {
  3732. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3733. switch (hparams.n_layer) {
  3734. case 24: model.type = e_model::MODEL_1B; break;
  3735. case 32: model.type = e_model::MODEL_3B; break;
  3736. default: model.type = e_model::MODEL_UNKNOWN;
  3737. }
  3738. } break;
  3739. case LLM_ARCH_PHI3:
  3740. {
  3741. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3742. switch (hparams.n_layer) {
  3743. case 24: model.type = e_model::MODEL_1B; break;
  3744. case 32: model.type = e_model::MODEL_3B; break;
  3745. case 40: model.type = e_model::MODEL_14B; break;
  3746. default: model.type = e_model::MODEL_UNKNOWN;
  3747. }
  3748. } break;
  3749. case LLM_ARCH_PLAMO:
  3750. {
  3751. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3752. switch (hparams.n_layer) {
  3753. case 40: model.type = e_model::MODEL_13B; break;
  3754. default: model.type = e_model::MODEL_UNKNOWN;
  3755. }
  3756. } break;
  3757. case LLM_ARCH_GPT2:
  3758. {
  3759. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3760. switch (hparams.n_layer) {
  3761. case 12: model.type = e_model::MODEL_SMALL; break;
  3762. case 24: model.type = e_model::MODEL_MEDIUM; break;
  3763. case 36: model.type = e_model::MODEL_LARGE; break;
  3764. case 48: model.type = e_model::MODEL_XL; break;
  3765. default: model.type = e_model::MODEL_UNKNOWN;
  3766. }
  3767. } break;
  3768. case LLM_ARCH_CODESHELL:
  3769. {
  3770. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3771. switch (hparams.n_layer) {
  3772. case 42: model.type = e_model::MODEL_SMALL; break;
  3773. default: model.type = e_model::MODEL_UNKNOWN;
  3774. }
  3775. } break;
  3776. case LLM_ARCH_ORION:
  3777. {
  3778. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3779. switch (hparams.n_layer) {
  3780. case 40: model.type = e_model::MODEL_14B; break;
  3781. default: model.type = e_model::MODEL_UNKNOWN;
  3782. }
  3783. } break;
  3784. case LLM_ARCH_INTERNLM2:
  3785. {
  3786. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3787. switch (hparams.n_layer) {
  3788. case 32: model.type = e_model::MODEL_7B; break;
  3789. case 48: model.type = e_model::MODEL_20B; break;
  3790. default: model.type = e_model::MODEL_UNKNOWN;
  3791. }
  3792. } break;
  3793. case LLM_ARCH_GEMMA:
  3794. {
  3795. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3796. switch (hparams.n_layer) {
  3797. case 18: model.type = e_model::MODEL_2B; break;
  3798. case 28: model.type = e_model::MODEL_7B; break;
  3799. default: model.type = e_model::MODEL_UNKNOWN;
  3800. }
  3801. } break;
  3802. case LLM_ARCH_STARCODER2:
  3803. {
  3804. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3805. switch (hparams.n_layer) {
  3806. case 30: model.type = e_model::MODEL_3B; break;
  3807. case 32: model.type = e_model::MODEL_7B; break;
  3808. case 40: model.type = e_model::MODEL_15B; break;
  3809. case 52: model.type = e_model::MODEL_20B; break; // granite
  3810. case 88: model.type = e_model::MODEL_34B; break; // granite
  3811. default: model.type = e_model::MODEL_UNKNOWN;
  3812. }
  3813. } break;
  3814. case LLM_ARCH_MAMBA:
  3815. {
  3816. ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv);
  3817. ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner);
  3818. ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state);
  3819. ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
  3820. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3821. switch (hparams.n_layer) {
  3822. case 24:
  3823. switch (hparams.n_embd) {
  3824. case 768: model.type = e_model::MODEL_SMALL; break;
  3825. default: model.type = e_model::MODEL_UNKNOWN;
  3826. } break;
  3827. case 48:
  3828. switch (hparams.n_embd) {
  3829. case 1024: model.type = e_model::MODEL_MEDIUM; break;
  3830. case 1536: model.type = e_model::MODEL_LARGE; break;
  3831. case 2048: model.type = e_model::MODEL_XL; break;
  3832. default: model.type = e_model::MODEL_UNKNOWN;
  3833. } break;
  3834. case 64:
  3835. switch (hparams.n_embd) {
  3836. case 2560: model.type = e_model::MODEL_3B; break;
  3837. default: model.type = e_model::MODEL_UNKNOWN;
  3838. } break;
  3839. default: model.type = e_model::MODEL_UNKNOWN;
  3840. }
  3841. } break;
  3842. case LLM_ARCH_XVERSE:
  3843. {
  3844. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3845. switch (hparams.n_layer) {
  3846. case 32: model.type = e_model::MODEL_7B; break;
  3847. case 40: model.type = e_model::MODEL_13B; break;
  3848. case 80: model.type = e_model::MODEL_65B; break;
  3849. default: model.type = e_model::MODEL_UNKNOWN;
  3850. }
  3851. } break;
  3852. case LLM_ARCH_COMMAND_R:
  3853. {
  3854. ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
  3855. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3856. switch (hparams.n_layer) {
  3857. case 40: model.type = e_model::MODEL_35B; break;
  3858. default: model.type = e_model::MODEL_UNKNOWN;
  3859. }
  3860. } break;
  3861. case LLM_ARCH_DBRX:
  3862. {
  3863. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3864. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv);
  3865. switch (hparams.n_layer) {
  3866. case 40: model.type = e_model::MODEL_16x12B; break;
  3867. default: model.type = e_model::MODEL_UNKNOWN;
  3868. }
  3869. } break;
  3870. case LLM_ARCH_OLMO:
  3871. {
  3872. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3873. ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
  3874. switch (hparams.n_layer) {
  3875. case 22: model.type = e_model::MODEL_1B; break;
  3876. case 32: model.type = e_model::MODEL_7B; break;
  3877. case 80: model.type = e_model::MODEL_70B; break;
  3878. default: model.type = e_model::MODEL_UNKNOWN;
  3879. }
  3880. } break;
  3881. case LLM_ARCH_GPTNEOX:
  3882. {
  3883. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
  3884. ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res);
  3885. switch (hparams.n_layer) {
  3886. case 6:
  3887. switch (hparams.n_ff) {
  3888. case 512: model.type = e_model::MODEL_14M; break;
  3889. case 2048: model.type = e_model::MODEL_70M; break;
  3890. default: model.type = e_model::MODEL_UNKNOWN;
  3891. } break;
  3892. case 12:
  3893. switch (hparams.n_ff) {
  3894. case 3072: model.type = e_model::MODEL_160M; break;
  3895. default: model.type = e_model::MODEL_UNKNOWN;
  3896. } break;
  3897. case 16:
  3898. switch (hparams.n_ff) {
  3899. case 8192: model.type = e_model::MODEL_1B; break;
  3900. default: model.type = e_model::MODEL_UNKNOWN;
  3901. } break;
  3902. case 24:
  3903. switch (hparams.n_ff) {
  3904. case 4096: model.type = e_model::MODEL_410M; break;
  3905. case 8192: model.type = e_model::MODEL_1_4B; break;
  3906. default: model.type = e_model::MODEL_UNKNOWN;
  3907. } break;
  3908. case 32:
  3909. switch (hparams.n_ff) {
  3910. case 10240: model.type = e_model::MODEL_2_8B; break;
  3911. case 16384: model.type = e_model::MODEL_6_9B; break;
  3912. default: model.type = e_model::MODEL_UNKNOWN;
  3913. } break;
  3914. case 36:
  3915. switch (hparams.n_ff) {
  3916. case 20480: model.type = e_model::MODEL_12B; break;
  3917. default: model.type = e_model::MODEL_UNKNOWN;
  3918. } break;
  3919. case 44:
  3920. switch (hparams.n_ff) {
  3921. case 24576: model.type = e_model::MODEL_20B; break;
  3922. default: model.type = e_model::MODEL_UNKNOWN;
  3923. } break;
  3924. default: model.type = e_model::MODEL_UNKNOWN;
  3925. }
  3926. } break;
  3927. case LLM_ARCH_ARCTIC:
  3928. {
  3929. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3930. if (hparams.n_expert == 128) {
  3931. switch (hparams.n_layer) {
  3932. case 35: model.type = e_model::MODEL_10B_128x3_66B; break;
  3933. default: model.type = e_model::MODEL_UNKNOWN;
  3934. }
  3935. } else {
  3936. model.type = e_model::MODEL_UNKNOWN;
  3937. }
  3938. } break;
  3939. case LLM_ARCH_DEEPSEEK2:
  3940. {
  3941. bool is_lite = (hparams.n_layer == 27);
  3942. ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  3943. ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
  3944. if (!is_lite) {
  3945. ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
  3946. }
  3947. ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
  3948. ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
  3949. ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
  3950. ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
  3951. ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul);
  3952. switch (hparams.n_layer) {
  3953. case 27: model.type = e_model::MODEL_16B; break;
  3954. case 60: model.type = e_model::MODEL_236B; break;
  3955. default: model.type = e_model::MODEL_UNKNOWN;
  3956. }
  3957. } break;
  3958. default: (void)0;
  3959. }
  3960. model.ftype = ml.ftype;
  3961. if (hparams.f_max_alibi_bias > 0.0f) {
  3962. hparams.use_alibi = true;
  3963. }
  3964. hparams.rope_type = llama_rope_type(&model);
  3965. }
  3966. // TODO: This should probably be in llama.h
  3967. static std::vector<llama_vocab::id> llama_tokenize_internal(
  3968. const llama_vocab & vocab, std::string raw_text, bool add_special, bool parse_special = false
  3969. );
  3970. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch);
  3971. static void llm_load_vocab(
  3972. llama_model_loader & ml,
  3973. llama_model & model) {
  3974. auto & vocab = model.vocab;
  3975. struct gguf_context * ctx = ml.meta;
  3976. const auto kv = LLM_KV(model.arch);
  3977. // determine vocab type
  3978. {
  3979. std::string tokenizer_model;
  3980. std::string tokenizer_pre;
  3981. ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model);
  3982. ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
  3983. if (tokenizer_model == "no_vocab") {
  3984. vocab.type = LLAMA_VOCAB_TYPE_NONE;
  3985. // default special tokens
  3986. vocab.special_bos_id = -1;
  3987. vocab.special_eos_id = -1;
  3988. vocab.special_unk_id = -1;
  3989. vocab.special_sep_id = -1;
  3990. vocab.special_pad_id = -1;
  3991. vocab.special_cls_id = -1;
  3992. vocab.special_mask_id = -1;
  3993. vocab.linefeed_id = -1;
  3994. return;
  3995. } else if (tokenizer_model == "llama") {
  3996. vocab.type = LLAMA_VOCAB_TYPE_SPM;
  3997. // default special tokens
  3998. vocab.special_bos_id = 1;
  3999. vocab.special_eos_id = 2;
  4000. vocab.special_unk_id = 0;
  4001. vocab.special_sep_id = -1;
  4002. vocab.special_pad_id = -1;
  4003. vocab.special_cls_id = -1;
  4004. vocab.special_mask_id = -1;
  4005. // For Fill-In-the-Middle (FIM)/infill models which where converted
  4006. // prior to support of FIM special tokens in GGUF, the following
  4007. // will allow those models to continue to work. The general names
  4008. // of the known models are currently CodeLlama (LLM_ARCH_LLAMA) and
  4009. // CodeGemma (LLM_ARCH_GEMMA). This can potentially be removed once
  4010. // new versions of these models have been published.
  4011. std::string gen_name;
  4012. ml.get_key(LLM_KV_GENERAL_NAME, gen_name, false);
  4013. std::transform(gen_name.begin(), gen_name.end(), gen_name.begin(),
  4014. [](unsigned char c){ return std::tolower(c); });
  4015. if (gen_name.find("code") != std::string::npos) {
  4016. if (model.arch == LLM_ARCH_LLAMA) {
  4017. vocab.special_prefix_id = 32007;
  4018. vocab.special_suffix_id = 32008;
  4019. vocab.special_middle_id = 32009;
  4020. vocab.special_eot_id = 32010;
  4021. } else if (model.arch == LLM_ARCH_GEMMA) {
  4022. vocab.special_prefix_id = 67;
  4023. vocab.special_suffix_id = 69;
  4024. vocab.special_middle_id = 68;
  4025. // TODO: this is not EOT, it is "file separator" token, needs fix
  4026. // https://huggingface.co/google/codegemma-7b-it/blob/9b1d9231388358c04d90bd003458f5070d97db44/tokenizer_config.json#L565-L572
  4027. //vocab.special_eot_id = 70;
  4028. vocab.special_eot_id = 107;
  4029. }
  4030. }
  4031. const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
  4032. if (add_space_prefix_keyidx != -1) {
  4033. vocab.add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
  4034. } // The default value of add_space_prefix is true.
  4035. } else if (tokenizer_model == "bert") {
  4036. vocab.type = LLAMA_VOCAB_TYPE_WPM;
  4037. // default special tokens
  4038. vocab.special_bos_id = -1;
  4039. vocab.special_eos_id = -1;
  4040. vocab.special_unk_id = 100;
  4041. vocab.special_sep_id = 102;
  4042. vocab.special_pad_id = 0;
  4043. vocab.special_cls_id = 101;
  4044. vocab.special_mask_id = 103;
  4045. vocab.add_space_prefix = false;
  4046. } else if (tokenizer_model == "gpt2") {
  4047. vocab.type = LLAMA_VOCAB_TYPE_BPE;
  4048. const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
  4049. if (add_space_prefix_keyidx != -1) {
  4050. vocab.add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
  4051. }
  4052. // read bpe merges and populate bpe ranks
  4053. const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
  4054. if (merges_keyidx == -1) {
  4055. throw std::runtime_error("cannot find tokenizer merges in model file\n");
  4056. }
  4057. const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
  4058. for (int i = 0; i < n_merges; i++) {
  4059. const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
  4060. GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
  4061. std::string first;
  4062. std::string second;
  4063. const size_t pos = word.find(' ', 1);
  4064. if (pos != std::string::npos) {
  4065. first = word.substr(0, pos);
  4066. second = word.substr(pos + 1);
  4067. }
  4068. vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
  4069. }
  4070. // default special tokens
  4071. vocab.special_bos_id = 11;
  4072. vocab.special_eos_id = 11;
  4073. vocab.special_unk_id = -1;
  4074. vocab.special_sep_id = -1;
  4075. vocab.special_pad_id = -1;
  4076. vocab.special_cls_id = -1;
  4077. vocab.special_mask_id = -1;
  4078. } else {
  4079. throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str()));
  4080. }
  4081. // for now, only BPE models have pre-tokenizers
  4082. if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
  4083. if (tokenizer_pre.empty()) {
  4084. LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
  4085. LLAMA_LOG_WARN("%s: \n", __func__);
  4086. LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
  4087. LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__);
  4088. LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__);
  4089. LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
  4090. LLAMA_LOG_WARN("%s: \n", __func__);
  4091. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  4092. } else if (tokenizer_pre == "default") {
  4093. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  4094. } else if (
  4095. tokenizer_pre == "llama3" ||
  4096. tokenizer_pre == "llama-v3" ||
  4097. tokenizer_pre == "llama-bpe") {
  4098. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3;
  4099. } else if (
  4100. tokenizer_pre == "deepseek-llm") {
  4101. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM;
  4102. } else if (
  4103. tokenizer_pre == "deepseek-coder") {
  4104. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
  4105. } else if (
  4106. tokenizer_pre == "falcon") {
  4107. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON;
  4108. } else if (
  4109. tokenizer_pre == "mpt") {
  4110. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT;
  4111. } else if (
  4112. tokenizer_pre == "starcoder") {
  4113. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
  4114. } else if (
  4115. tokenizer_pre == "gpt-2" ||
  4116. tokenizer_pre == "jina-es" ||
  4117. tokenizer_pre == "jina-de" ||
  4118. tokenizer_pre == "jina-v2-es" ||
  4119. tokenizer_pre == "jina-v2-de" ||
  4120. tokenizer_pre == "jina-v2-code") {
  4121. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
  4122. } else if (
  4123. tokenizer_pre == "refact") {
  4124. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT;
  4125. } else if (
  4126. tokenizer_pre == "command-r") {
  4127. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
  4128. } else if (
  4129. tokenizer_pre == "qwen2") {
  4130. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
  4131. } else if (
  4132. tokenizer_pre == "stablelm2") {
  4133. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2;
  4134. } else if (
  4135. tokenizer_pre == "olmo") {
  4136. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
  4137. } else if (
  4138. tokenizer_pre == "dbrx") {
  4139. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
  4140. } else if (
  4141. tokenizer_pre == "smaug-bpe") {
  4142. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG;
  4143. } else {
  4144. throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
  4145. }
  4146. } else {
  4147. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  4148. }
  4149. }
  4150. const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
  4151. if (token_idx == -1) {
  4152. throw std::runtime_error("cannot find tokenizer vocab in model file\n");
  4153. }
  4154. const float * scores = nullptr;
  4155. const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
  4156. if (score_idx != -1) {
  4157. scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
  4158. }
  4159. const int * toktypes = nullptr;
  4160. const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
  4161. if (toktype_idx != -1) {
  4162. toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
  4163. }
  4164. const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
  4165. vocab.id_to_token.resize(n_vocab);
  4166. for (uint32_t i = 0; i < n_vocab; i++) {
  4167. std::string word = gguf_get_arr_str(ctx, token_idx, i);
  4168. GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
  4169. vocab.token_to_id[word] = i;
  4170. auto & token_data = vocab.id_to_token[i];
  4171. token_data.text = std::move(word);
  4172. token_data.score = scores ? scores[i] : 0.0f;
  4173. token_data.attr = LLAMA_TOKEN_ATTR_NORMAL;
  4174. if (toktypes) { //TODO: remove, required until per token attributes are available from GGUF file
  4175. switch(toktypes[i]) {
  4176. case LLAMA_TOKEN_TYPE_UNKNOWN: token_data.attr = LLAMA_TOKEN_ATTR_UNKNOWN; break;
  4177. case LLAMA_TOKEN_TYPE_UNUSED: token_data.attr = LLAMA_TOKEN_ATTR_UNUSED; break;
  4178. case LLAMA_TOKEN_TYPE_NORMAL: token_data.attr = LLAMA_TOKEN_ATTR_NORMAL; break;
  4179. case LLAMA_TOKEN_TYPE_CONTROL: token_data.attr = LLAMA_TOKEN_ATTR_CONTROL; break;
  4180. case LLAMA_TOKEN_TYPE_USER_DEFINED: token_data.attr = LLAMA_TOKEN_ATTR_USER_DEFINED; break;
  4181. case LLAMA_TOKEN_TYPE_BYTE: token_data.attr = LLAMA_TOKEN_ATTR_BYTE; break;
  4182. case LLAMA_TOKEN_TYPE_UNDEFINED: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break;
  4183. default: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break;
  4184. }
  4185. }
  4186. }
  4187. GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
  4188. // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
  4189. if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
  4190. try {
  4191. vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
  4192. } catch (const std::exception & e) {
  4193. LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what());
  4194. vocab.linefeed_id = vocab.special_pad_id;
  4195. }
  4196. } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
  4197. vocab.linefeed_id = vocab.special_pad_id;
  4198. } else {
  4199. const std::vector<int> ids = llama_tokenize_internal(vocab, "\xC4\x8A", false); // U+010A
  4200. GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
  4201. vocab.linefeed_id = ids[0];
  4202. }
  4203. // special tokens
  4204. {
  4205. const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
  4206. { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
  4207. { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
  4208. { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
  4209. { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
  4210. { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
  4211. { LLM_KV_TOKENIZER_CLS_ID, vocab.special_cls_id },
  4212. { LLM_KV_TOKENIZER_MASK_ID, vocab.special_mask_id },
  4213. { LLM_KV_TOKENIZER_PREFIX_ID, vocab.special_prefix_id },
  4214. { LLM_KV_TOKENIZER_SUFFIX_ID, vocab.special_suffix_id },
  4215. { LLM_KV_TOKENIZER_MIDDLE_ID, vocab.special_middle_id },
  4216. { LLM_KV_TOKENIZER_EOT_ID, vocab.special_eot_id },
  4217. };
  4218. for (const auto & it : special_token_types) {
  4219. const std::string & key = kv(std::get<0>(it));
  4220. int32_t & id = std::get<1>(it);
  4221. uint32_t new_id;
  4222. if (!ml.get_key(std::get<0>(it), new_id, false)) {
  4223. continue;
  4224. }
  4225. if (new_id >= vocab.id_to_token.size()) {
  4226. LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
  4227. __func__, key.c_str(), new_id, id);
  4228. } else {
  4229. id = new_id;
  4230. }
  4231. }
  4232. // Handle add_bos_token and add_eos_token
  4233. {
  4234. bool temp = true;
  4235. if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
  4236. vocab.special_add_bos = int(temp);
  4237. }
  4238. if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
  4239. vocab.special_add_eos = int(temp);
  4240. }
  4241. }
  4242. // find EOT token: "<|eot_id|>", "<|im_end|>", "<end_of_turn>", etc.
  4243. //
  4244. // TODO: convert scripts should provide this token through the KV metadata LLAMA_KV_TOKENIZER_EOT_ID
  4245. // for now, we apply this workaround to find the EOT token based on its text
  4246. if (vocab.special_eot_id == -1) {
  4247. for (const auto & t : vocab.token_to_id) {
  4248. if (
  4249. // TODO: gemma "<end_of_turn>" is exported as a normal token, so the following check does not work
  4250. // need to fix convert script
  4251. //vocab.id_to_token[t.second].type == LLAMA_TOKEN_TYPE_CONTROL &&
  4252. (t.first == "<|eot_id|>" ||
  4253. t.first == "<|im_end|>" ||
  4254. t.first == "<|end|>" ||
  4255. t.first == "<end_of_turn>" ||
  4256. t.first == "<|endoftext|>"
  4257. )
  4258. ) {
  4259. vocab.special_eot_id = t.second;
  4260. break;
  4261. }
  4262. }
  4263. }
  4264. }
  4265. // build special tokens cache
  4266. {
  4267. for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
  4268. if (!(vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL)) {
  4269. vocab.cache_special_tokens.push_back(id);
  4270. }
  4271. }
  4272. std::sort( vocab.cache_special_tokens.begin(), vocab.cache_special_tokens.end(),
  4273. [&] (const llama_vocab::id a, const llama_vocab::id b) {
  4274. return vocab.id_to_token[a].text.size() > vocab.id_to_token[b].text.size();
  4275. }
  4276. );
  4277. LLAMA_LOG_INFO("%s: special tokens cache size = %u\n", __func__, (uint32_t)vocab.cache_special_tokens.size());
  4278. }
  4279. // build token to piece cache
  4280. {
  4281. size_t size_cache = 0;
  4282. std::vector<llama_vocab::token> cache_token_to_piece(n_vocab);
  4283. for (uint32_t id = 0; id < n_vocab; ++id) {
  4284. cache_token_to_piece[id] = llama_token_to_piece(&model, id, true);
  4285. size_cache += cache_token_to_piece[id].size();
  4286. }
  4287. std::swap(vocab.cache_token_to_piece, cache_token_to_piece);
  4288. LLAMA_LOG_INFO("%s: token to piece cache size = %.4f MB\n", __func__, size_cache / 1024.0 / 1024.0);
  4289. }
  4290. // Handle per token attributes
  4291. //NOTE: Each model customizes per token attributes.
  4292. //NOTE: Per token attributes are missing from the GGUF file.
  4293. //TODO: Extract attributes from GGUF file.
  4294. {
  4295. auto _contains_any = [] (const std::string &str, const std::vector<std::string> &substrs) -> bool {
  4296. for (auto substr : substrs) {
  4297. if (str.find(substr) < std::string::npos) {
  4298. return true;
  4299. }
  4300. }
  4301. return false;
  4302. };
  4303. auto _set_tokenid_attr = [&] (const llama_vocab::id id, llama_token_attr attr, bool value) {
  4304. uint32_t current = vocab.id_to_token.at(id).attr;
  4305. current = value ? (current | attr) : (current & ~attr);
  4306. vocab.id_to_token[id].attr = (llama_token_attr) current;
  4307. };
  4308. auto _set_token_attr = [&] (const std::string & token, llama_token_attr attr, bool value) {
  4309. _set_tokenid_attr(vocab.token_to_id.at(token), attr, value);
  4310. };
  4311. std::string model_name;
  4312. std::string tokenizer_pre;
  4313. ml.get_key(LLM_KV_GENERAL_NAME, model_name, false);
  4314. ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
  4315. // model name to lowercase
  4316. std::transform(model_name.begin(), model_name.end(), model_name.begin(),
  4317. [] (const std::string::value_type x) {
  4318. return std::tolower(x);
  4319. }
  4320. );
  4321. // set attributes by model/tokenizer name
  4322. if (_contains_any(tokenizer_pre, {"jina-v2-es", "jina-v2-de"})) {
  4323. _set_token_attr("<mask>", LLAMA_TOKEN_ATTR_LSTRIP, true);
  4324. } else if (_contains_any(model_name, {"phi-3", "phi3"})) {
  4325. for (auto id : vocab.cache_special_tokens) {
  4326. _set_tokenid_attr(id, LLAMA_TOKEN_ATTR_RSTRIP, true);
  4327. }
  4328. for (auto token : {"</s>"}) {
  4329. _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, true);
  4330. }
  4331. for (auto token : {"<unk>", "<s>", "<|endoftext|>"}) {
  4332. _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, false);
  4333. }
  4334. }
  4335. }
  4336. }
  4337. static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
  4338. const auto & hparams = model.hparams;
  4339. const auto & vocab = model.vocab;
  4340. const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
  4341. // hparams
  4342. LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
  4343. LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch));
  4344. LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, llama_model_vocab_type_name(vocab.type));
  4345. LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  4346. LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
  4347. LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
  4348. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  4349. LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
  4350. LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
  4351. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  4352. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot);
  4353. LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k);
  4354. LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v);
  4355. LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
  4356. LLAMA_LOG_INFO("%s: n_embd_k_gqa = %u\n", __func__, hparams.n_embd_k_gqa());
  4357. LLAMA_LOG_INFO("%s: n_embd_v_gqa = %u\n", __func__, hparams.n_embd_v_gqa());
  4358. LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
  4359. LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
  4360. LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
  4361. LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
  4362. LLAMA_LOG_INFO("%s: f_logit_scale = %.1e\n", __func__, hparams.f_logit_scale);
  4363. LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
  4364. LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
  4365. LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
  4366. LLAMA_LOG_INFO("%s: causal attn = %d\n", __func__, hparams.causal_attn);
  4367. LLAMA_LOG_INFO("%s: pooling type = %d\n", __func__, hparams.pooling_type);
  4368. LLAMA_LOG_INFO("%s: rope type = %d\n", __func__, hparams.rope_type);
  4369. LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type);
  4370. LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
  4371. LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
  4372. LLAMA_LOG_INFO("%s: n_ctx_orig_yarn = %u\n", __func__, hparams.n_ctx_orig_yarn);
  4373. LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
  4374. LLAMA_LOG_INFO("%s: ssm_d_conv = %u\n", __func__, hparams.ssm_d_conv);
  4375. LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner);
  4376. LLAMA_LOG_INFO("%s: ssm_d_state = %u\n", __func__, hparams.ssm_d_state);
  4377. LLAMA_LOG_INFO("%s: ssm_dt_rank = %u\n", __func__, hparams.ssm_dt_rank);
  4378. LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
  4379. LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
  4380. if (ml.n_elements >= 1e12) {
  4381. LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, ml.n_elements*1e-12);
  4382. } else if (ml.n_elements >= 1e9) {
  4383. LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
  4384. } else if (ml.n_elements >= 1e6) {
  4385. LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, ml.n_elements*1e-6);
  4386. } else {
  4387. LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
  4388. }
  4389. if (ml.n_bytes < GiB) {
  4390. LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  4391. } else {
  4392. LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
  4393. }
  4394. // general kv
  4395. LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
  4396. // special tokens
  4397. if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
  4398. if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
  4399. if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
  4400. if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
  4401. if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
  4402. if (vocab.special_cls_id != -1) { LLAMA_LOG_INFO( "%s: CLS token = %d '%s'\n", __func__, vocab.special_cls_id, vocab.id_to_token[vocab.special_cls_id].text.c_str() ); }
  4403. if (vocab.special_mask_id != -1) { LLAMA_LOG_INFO( "%s: MASK token = %d '%s'\n", __func__, vocab.special_mask_id, vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
  4404. if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
  4405. if (vocab.special_prefix_id != -1) { LLAMA_LOG_INFO( "%s: PRE token = %d '%s'\n", __func__, vocab.special_prefix_id, vocab.id_to_token[vocab.special_prefix_id].text.c_str() ); }
  4406. if (vocab.special_suffix_id != -1) { LLAMA_LOG_INFO( "%s: SUF token = %d '%s'\n", __func__, vocab.special_suffix_id, vocab.id_to_token[vocab.special_suffix_id].text.c_str() ); }
  4407. if (vocab.special_middle_id != -1) { LLAMA_LOG_INFO( "%s: MID token = %d '%s'\n", __func__, vocab.special_middle_id, vocab.id_to_token[vocab.special_middle_id].text.c_str() ); }
  4408. if (vocab.special_eot_id != -1) { LLAMA_LOG_INFO( "%s: EOT token = %d '%s'\n", __func__, vocab.special_eot_id, vocab.id_to_token[vocab.special_eot_id].text.c_str() ); }
  4409. if (model.arch == LLM_ARCH_DEEPSEEK2) {
  4410. LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead);
  4411. LLAMA_LOG_INFO("%s: n_lora_q = %d\n", __func__, hparams.n_lora_q);
  4412. LLAMA_LOG_INFO("%s: n_lora_kv = %d\n", __func__, hparams.n_lora_kv);
  4413. LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
  4414. LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared);
  4415. LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale);
  4416. LLAMA_LOG_INFO("%s: rope_yarn_log_mul = %.4f\n", __func__, hparams.rope_yarn_log_mul);
  4417. }
  4418. }
  4419. // Returns false if cancelled by progress_callback
  4420. static bool llm_load_tensors(
  4421. llama_model_loader & ml,
  4422. llama_model & model,
  4423. int n_gpu_layers,
  4424. enum llama_split_mode split_mode,
  4425. int main_gpu,
  4426. const float * tensor_split,
  4427. bool use_mlock,
  4428. llama_progress_callback progress_callback,
  4429. void * progress_callback_user_data) {
  4430. model.t_start_us = ggml_time_us();
  4431. auto & hparams = model.hparams;
  4432. #ifdef GGML_USE_SYCL
  4433. // disable MoE with SYCL until mul_mat_id is updated
  4434. if (hparams.n_expert > 0) {
  4435. n_gpu_layers = 0;
  4436. }
  4437. #endif
  4438. model.split_mode = split_mode;
  4439. model.main_gpu = main_gpu;
  4440. model.n_gpu_layers = n_gpu_layers;
  4441. const int64_t n_layer = hparams.n_layer;
  4442. const int64_t i_gpu_start = std::max((int64_t) hparams.n_layer - n_gpu_layers, (int64_t) 0);
  4443. bool use_mmap_buffer = true;
  4444. // there is very little benefit to offloading the input layer, so always keep it on the CPU
  4445. model.buft_input = llama_default_buffer_type_cpu(true);
  4446. //model.buft_input = llama_default_buffer_type_offload(main_gpu);
  4447. model.buft_layer.resize(n_layer);
  4448. // assign cpu layers
  4449. for (int64_t i = 0; i < i_gpu_start; ++i) {
  4450. model.buft_layer[i] = llama_default_buffer_type_cpu(true);
  4451. }
  4452. if (split_mode == LLAMA_SPLIT_MODE_LAYER) {
  4453. // calculate the split points
  4454. int device_count = llama_get_device_count(model);
  4455. bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + device_count, [](float x) { return x == 0.0f; });
  4456. std::vector<float> splits(device_count);
  4457. if (all_zero) {
  4458. // default split, by free memory
  4459. for (int i = 0; i < device_count; ++i) {
  4460. splits[i] = llama_get_device_memory(model, i);
  4461. }
  4462. } else {
  4463. std::copy(tensor_split, tensor_split + device_count, splits.begin());
  4464. }
  4465. // sum and normalize the splits to get the split points
  4466. float split_sum = 0.0f;
  4467. for (int i = 0; i < device_count; ++i) {
  4468. split_sum += splits[i];
  4469. splits[i] = split_sum;
  4470. }
  4471. for (int i = 0; i < device_count; ++i) {
  4472. splits[i] /= split_sum;
  4473. }
  4474. // assign the repeating layers to the devices according to the splits
  4475. int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1);
  4476. for (int64_t i = i_gpu_start; i < n_layer; ++i) {
  4477. int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits.begin();
  4478. model.buft_layer[i] = llama_default_buffer_type_offload(model, layer_gpu);
  4479. }
  4480. // assign the output layer
  4481. if (n_gpu_layers > n_layer) {
  4482. int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(act_gpu_layers - 1)/act_gpu_layers) - splits.begin();
  4483. model.buft_output = llama_default_buffer_type_offload(model, layer_gpu);
  4484. } else {
  4485. model.buft_output = llama_default_buffer_type_cpu(true);
  4486. }
  4487. } else {
  4488. ggml_backend_buffer_type_t split_buft;
  4489. if (split_mode == LLAMA_SPLIT_MODE_ROW) {
  4490. split_buft = llama_default_buffer_type_split(model, main_gpu, tensor_split);
  4491. } else {
  4492. // LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_LAYER in backends where it is not supported
  4493. split_buft = llama_default_buffer_type_offload(model, main_gpu);
  4494. }
  4495. // assign the repeating layers
  4496. for (int64_t i = i_gpu_start; i < n_layer; ++i) {
  4497. model.buft_layer[i] = {
  4498. split_buft,
  4499. llama_default_buffer_type_offload(model, main_gpu)
  4500. };
  4501. }
  4502. // assign the output layer
  4503. if (n_gpu_layers > n_layer) {
  4504. model.buft_output = {
  4505. split_buft,
  4506. llama_default_buffer_type_offload(model, main_gpu)
  4507. };
  4508. } else {
  4509. model.buft_output = llama_default_buffer_type_cpu(true);
  4510. }
  4511. }
  4512. // count used buffer types
  4513. std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
  4514. buft_layer_count[model.buft_input.buft]++;
  4515. buft_layer_count[model.buft_input.buft_matrix]++;
  4516. buft_layer_count[model.buft_output.buft]++;
  4517. buft_layer_count[model.buft_output.buft_matrix]++;
  4518. for (int64_t i = 0; i < n_layer; ++i) {
  4519. buft_layer_count[model.buft_layer[i].buft]++;
  4520. buft_layer_count[model.buft_layer[i].buft_matrix]++;
  4521. }
  4522. // create one context per buffer type
  4523. size_t ctx_size = ggml_tensor_overhead()*(ml.n_tensors + 1); // +1 for models where tok_embd is duplicated as output
  4524. // for moe merged tensors
  4525. ctx_size += ggml_tensor_overhead()*n_layer*3;
  4526. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  4527. for (auto & it : buft_layer_count) {
  4528. struct ggml_init_params params = {
  4529. /*.mem_size =*/ ctx_size,
  4530. /*.mem_buffer =*/ NULL,
  4531. /*.no_alloc =*/ true,
  4532. };
  4533. ggml_context * ctx = ggml_init(params);
  4534. if (!ctx) {
  4535. throw std::runtime_error(format("failed to create context"));
  4536. }
  4537. ctx_map[it.first] = ctx;
  4538. model.ctxs.push_back(ctx);
  4539. }
  4540. LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0);
  4541. // create tensors for the weights
  4542. {
  4543. const int64_t n_embd = hparams.n_embd;
  4544. const int64_t n_embd_head = n_embd / hparams.n_head;
  4545. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  4546. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  4547. const int64_t n_embd_gqa = n_embd_v_gqa;
  4548. const int64_t n_vocab = hparams.n_vocab;
  4549. const int64_t n_vocab_type = hparams.n_vocab_type;
  4550. const int64_t n_ff = hparams.n_ff;
  4551. const int64_t n_expert = hparams.n_expert;
  4552. if (n_expert > 0 && hparams.n_expert_used == 0) {
  4553. throw std::runtime_error("model has expert layers but no expert layers are used");
  4554. }
  4555. ggml_context * ctx_input = ctx_map.at(model.buft_input.buft);
  4556. ggml_context * ctx_output = ctx_map.at(model.buft_output.buft);
  4557. ggml_context * ctx_output_split = ctx_map.at(model.buft_output.buft_matrix);
  4558. auto ctx_for_layer = [&](int i) { return ctx_map.at(model.buft_layer[i].buft); };
  4559. auto ctx_for_layer_split = [&](int i) { return ctx_map.at(model.buft_layer[i].buft_matrix); };
  4560. model.layers.resize(n_layer);
  4561. const auto tn = LLM_TN(model.arch);
  4562. switch (model.arch) {
  4563. case LLM_ARCH_LLAMA:
  4564. case LLM_ARCH_REFACT:
  4565. case LLM_ARCH_MINICPM:
  4566. {
  4567. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4568. // output
  4569. {
  4570. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4571. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4572. // if output is NULL, init from the input tok embed
  4573. if (model.output == NULL) {
  4574. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
  4575. }
  4576. }
  4577. for (int i = 0; i < n_layer; ++i) {
  4578. ggml_context * ctx_layer = ctx_for_layer(i);
  4579. ggml_context * ctx_split = ctx_for_layer_split(i);
  4580. auto & layer = model.layers[i];
  4581. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4582. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4583. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4584. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4585. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4586. // optional bias tensors
  4587. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4588. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4589. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4590. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4591. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4592. if (n_expert == 0) {
  4593. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4594. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4595. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4596. // optional MLP bias
  4597. layer.ffn_gate_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4598. layer.ffn_down_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4599. layer.ffn_up_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4600. } else {
  4601. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
  4602. layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4603. if (layer.ffn_gate_exps) {
  4604. layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert});
  4605. layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
  4606. } else {
  4607. // merge split expert into a single tensor for compatibility with older models
  4608. // requires disabling mmap
  4609. use_mmap_buffer = false;
  4610. ggml_type type_gate = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, 0).c_str())->type;
  4611. ggml_type type_down = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, 0).c_str())->type;
  4612. ggml_type type_up = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, 0).c_str())->type;
  4613. layer.ffn_gate_exps = ggml_new_tensor_3d(ctx_split, type_gate, n_embd, n_ff, n_expert);
  4614. layer.ffn_down_exps = ggml_new_tensor_3d(ctx_split, type_down, n_ff, n_embd, n_expert);
  4615. layer.ffn_up_exps = ggml_new_tensor_3d(ctx_split, type_up, n_embd, n_ff, n_expert);
  4616. ggml_set_name(layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i).c_str());
  4617. ggml_set_name(layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i).c_str());
  4618. ggml_set_name(layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i).c_str());
  4619. for (uint32_t x = 0; x < n_expert; ++x) {
  4620. // the individual experts are loaded into a view of the merged tensor
  4621. ml.create_tensor_as_view(ctx_split, layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_gate_exps->nb[2]*x);
  4622. ml.create_tensor_as_view(ctx_split, layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd }, layer.ffn_down_exps->nb[2]*x);
  4623. ml.create_tensor_as_view(ctx_split, layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_up_exps->nb[2]*x);
  4624. }
  4625. }
  4626. }
  4627. }
  4628. } break;
  4629. case LLM_ARCH_GROK:
  4630. {
  4631. if (n_expert == 0) {
  4632. throw std::runtime_error("Grok model cannot have zero experts");
  4633. }
  4634. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4635. // output
  4636. {
  4637. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4638. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4639. // if output is NULL, init from the input tok embed
  4640. if (model.output == NULL) {
  4641. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
  4642. }
  4643. }
  4644. for (int i = 0; i < n_layer; ++i) {
  4645. ggml_context * ctx_layer = ctx_for_layer(i);
  4646. ggml_context * ctx_split = ctx_for_layer_split(i);
  4647. auto & layer = model.layers[i];
  4648. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4649. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4650. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4651. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4652. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4653. layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
  4654. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4655. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
  4656. layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4657. if (layer.ffn_gate_exps) {
  4658. layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert});
  4659. layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
  4660. } else {
  4661. // merge split expert into a single tensor for compatibility with older models
  4662. // requires disabling mmap
  4663. use_mmap_buffer = false;
  4664. ggml_type type_gate = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, 0).c_str())->type;
  4665. ggml_type type_down = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, 0).c_str())->type;
  4666. ggml_type type_up = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, 0).c_str())->type;
  4667. layer.ffn_gate_exps = ggml_new_tensor_3d(ctx_split, type_gate, n_embd, n_ff, n_expert);
  4668. layer.ffn_down_exps = ggml_new_tensor_3d(ctx_split, type_down, n_ff, n_embd, n_expert);
  4669. layer.ffn_up_exps = ggml_new_tensor_3d(ctx_split, type_up, n_embd, n_ff, n_expert);
  4670. ggml_set_name(layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i).c_str());
  4671. ggml_set_name(layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i).c_str());
  4672. ggml_set_name(layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i).c_str());
  4673. for (uint32_t x = 0; x < n_expert; ++x) {
  4674. // the individual experts are loaded into a view of the merged tensor
  4675. ml.create_tensor_as_view(ctx_split, layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_gate_exps->nb[2]*x);
  4676. ml.create_tensor_as_view(ctx_split, layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd }, layer.ffn_down_exps->nb[2]*x);
  4677. ml.create_tensor_as_view(ctx_split, layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_up_exps->nb[2]*x);
  4678. }
  4679. }
  4680. layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
  4681. }
  4682. } break;
  4683. case LLM_ARCH_DBRX:
  4684. {
  4685. if (n_expert == 0) {
  4686. throw std::runtime_error("DBRX model cannot have zero experts");
  4687. }
  4688. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4689. // output
  4690. {
  4691. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4692. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4693. }
  4694. for (int i = 0; i < n_layer; ++i) {
  4695. ggml_context * ctx_layer = ctx_for_layer(i);
  4696. ggml_context * ctx_split = ctx_for_layer_split(i);
  4697. auto & layer = model.layers[i];
  4698. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4699. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4700. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4701. layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
  4702. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
  4703. layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert});
  4704. layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert});
  4705. layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
  4706. }
  4707. } break;
  4708. case LLM_ARCH_BAICHUAN:
  4709. {
  4710. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4711. {
  4712. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4713. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4714. }
  4715. for (int i = 0; i < n_layer; ++i) {
  4716. ggml_context * ctx_layer = ctx_for_layer(i);
  4717. ggml_context * ctx_split = ctx_for_layer_split(i);
  4718. auto & layer = model.layers[i];
  4719. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4720. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4721. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4722. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4723. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4724. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4725. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4726. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4727. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4728. }
  4729. } break;
  4730. case LLM_ARCH_FALCON:
  4731. {
  4732. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4733. // output
  4734. {
  4735. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4736. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4737. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4738. if (!model.output) {
  4739. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // needs to be on GPU
  4740. }
  4741. }
  4742. for (int i = 0; i < n_layer; ++i) {
  4743. ggml_context * ctx_layer = ctx_for_layer(i);
  4744. ggml_context * ctx_split = ctx_for_layer_split(i);
  4745. auto & layer = model.layers[i];
  4746. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4747. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4748. layer.attn_norm_2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4749. layer.attn_norm_2_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4750. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4751. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4752. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4753. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4754. }
  4755. } break;
  4756. case LLM_ARCH_STARCODER:
  4757. {
  4758. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4759. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  4760. // output
  4761. {
  4762. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4763. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4764. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4765. if (!model.output) {
  4766. // needs to be on GPU
  4767. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
  4768. }
  4769. }
  4770. for (int i = 0; i < n_layer; ++i) {
  4771. ggml_context * ctx_layer = ctx_for_layer(i);
  4772. ggml_context * ctx_split = ctx_for_layer_split(i);
  4773. auto & layer = model.layers[i];
  4774. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4775. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4776. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4777. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  4778. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4779. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  4780. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4781. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  4782. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4783. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4784. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4785. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  4786. }
  4787. } break;
  4788. case LLM_ARCH_BERT:
  4789. case LLM_ARCH_NOMIC_BERT:
  4790. {
  4791. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4792. model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type});
  4793. if (model.arch == LLM_ARCH_BERT) {
  4794. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  4795. }
  4796. model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
  4797. model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
  4798. for (int i = 0; i < n_layer; ++i) {
  4799. ggml_context * ctx_layer = ctx_for_layer(i);
  4800. ggml_context * ctx_split = ctx_for_layer_split(i);
  4801. auto & layer = model.layers[i];
  4802. if (model.arch == LLM_ARCH_BERT) {
  4803. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4804. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  4805. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4806. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  4807. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4808. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  4809. } else {
  4810. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4811. }
  4812. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4813. layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
  4814. layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
  4815. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4816. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4817. if (model.arch == LLM_ARCH_BERT) {
  4818. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  4819. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  4820. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4821. } else {
  4822. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4823. }
  4824. layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
  4825. layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
  4826. }
  4827. } break;
  4828. case LLM_ARCH_JINA_BERT_V2:
  4829. {
  4830. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // word_embeddings
  4831. model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type}); //token_type_embeddings
  4832. model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}); // LayerNorm
  4833. model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}); //LayerNorm bias
  4834. for (int i = 0; i < n_layer; ++i) {
  4835. ggml_context * ctx_layer = ctx_for_layer(i);
  4836. ggml_context * ctx_split = ctx_for_layer_split(i);
  4837. auto & layer = model.layers[i]; // JinaBertLayer
  4838. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4839. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  4840. layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4841. layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4842. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4843. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  4844. layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4845. layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4846. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4847. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  4848. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); //output_dens
  4849. layer.bo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); //output_dens
  4850. layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}); //output_norm
  4851. layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
  4852. layer.attn_norm_2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4853. layer.attn_norm_2_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4854. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4855. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4856. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4857. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4858. layer.layer_out_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
  4859. layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
  4860. }
  4861. } break;
  4862. case LLM_ARCH_BLOOM:
  4863. {
  4864. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4865. model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
  4866. model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
  4867. // output
  4868. {
  4869. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4870. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4871. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4872. }
  4873. for (int i = 0; i < n_layer; ++i) {
  4874. ggml_context * ctx_layer = ctx_for_layer(i);
  4875. ggml_context * ctx_split = ctx_for_layer_split(i);
  4876. auto & layer = model.layers[i];
  4877. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4878. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4879. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4880. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  4881. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4882. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  4883. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4884. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  4885. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4886. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  4887. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4888. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  4889. }
  4890. } break;
  4891. case LLM_ARCH_MPT:
  4892. {
  4893. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4894. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4895. // output
  4896. {
  4897. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4898. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4899. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4900. if (!model.output) {
  4901. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // needs to be on GPU
  4902. }
  4903. }
  4904. for (int i = 0; i < n_layer; ++i) {
  4905. ggml_context * ctx_layer = ctx_for_layer(i);
  4906. ggml_context * ctx_split = ctx_for_layer_split(i);
  4907. auto & layer = model.layers[i];
  4908. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4909. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4910. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  4911. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4912. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4913. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4914. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4915. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4916. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  4917. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4918. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4919. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4920. layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4921. layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4922. layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4923. layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4924. // AWQ ScaleActivation layer
  4925. layer.ffn_act = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4926. }
  4927. } break;
  4928. case LLM_ARCH_STABLELM:
  4929. {
  4930. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4931. // output
  4932. {
  4933. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  4934. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4935. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4936. }
  4937. for (int i = 0; i < n_layer; ++i) {
  4938. ggml_context * ctx_layer = ctx_for_layer(i);
  4939. ggml_context * ctx_split = ctx_for_layer_split(i);
  4940. auto & layer = model.layers[i];
  4941. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4942. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  4943. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  4944. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  4945. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  4946. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4947. // optional bias tensors, present in Stable LM 2 1.6B
  4948. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4949. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4950. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4951. // optional q and k layernorms, present in StableLM 2 12B
  4952. layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {hparams.n_embd_head_k, hparams.n_head}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4953. layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {hparams.n_embd_head_k, hparams.n_head_kv}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4954. // optional FFN norm, not present in StableLM 2 12B which uses parallel residual
  4955. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4956. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4957. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  4958. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  4959. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  4960. }
  4961. } break;
  4962. case LLM_ARCH_QWEN:
  4963. {
  4964. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4965. // output
  4966. {
  4967. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4968. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  4969. }
  4970. for (int i = 0; i < n_layer; ++i) {
  4971. ggml_context * ctx_layer = ctx_for_layer(i);
  4972. ggml_context * ctx_split = ctx_for_layer_split(i);
  4973. auto & layer = model.layers[i];
  4974. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  4975. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3});
  4976. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd*3});
  4977. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  4978. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  4979. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2});
  4980. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd});
  4981. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff/2});
  4982. }
  4983. } break;
  4984. case LLM_ARCH_QWEN2:
  4985. {
  4986. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  4987. // output
  4988. {
  4989. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  4990. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  4991. // if output is NULL, init from the input tok embed
  4992. if (model.output == NULL) {
  4993. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
  4994. }
  4995. }
  4996. for (int i = 0; i < n_layer; ++i) {
  4997. ggml_context * ctx_layer = ctx_for_layer(i);
  4998. ggml_context * ctx_split = ctx_for_layer_split(i);
  4999. auto & layer = model.layers[i];
  5000. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5001. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5002. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5003. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5004. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5005. // optional bias tensors
  5006. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  5007. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  5008. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  5009. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5010. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5011. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5012. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5013. }
  5014. } break;
  5015. case LLM_ARCH_QWEN2MOE:
  5016. {
  5017. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5018. // output
  5019. {
  5020. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5021. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5022. }
  5023. for (int i = 0; i < n_layer; ++i) {
  5024. ggml_context * ctx_layer = ctx_for_layer(i);
  5025. ggml_context * ctx_split = ctx_for_layer_split(i);
  5026. auto & layer = model.layers[i];
  5027. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5028. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5029. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5030. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5031. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5032. // optional bias tensors
  5033. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  5034. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  5035. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  5036. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5037. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
  5038. GGML_ASSERT(hparams.n_expert > 0);
  5039. GGML_ASSERT(hparams.n_expert_used > 0);
  5040. // MoE branch
  5041. auto n_ff_exp = n_ff / hparams.n_expert_used;
  5042. layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert});
  5043. layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert});
  5044. layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert});
  5045. // Shared expert branch
  5046. layer.ffn_gate_inp_shexp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), {n_embd});
  5047. layer.ffn_gate_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff});
  5048. layer.ffn_down_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff, n_embd});
  5049. layer.ffn_up_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff});
  5050. }
  5051. } break;
  5052. case LLM_ARCH_PHI2:
  5053. {
  5054. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5055. // output
  5056. {
  5057. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5058. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  5059. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5060. model.output_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab});
  5061. }
  5062. for (int i = 0; i < n_layer; ++i) {
  5063. ggml_context * ctx_layer = ctx_for_layer(i);
  5064. ggml_context * ctx_split = ctx_for_layer_split(i);
  5065. auto & layer = model.layers[i];
  5066. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5067. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  5068. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
  5069. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
  5070. if (layer.wqkv == nullptr) {
  5071. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5072. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  5073. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5074. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  5075. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5076. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  5077. }
  5078. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5079. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  5080. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  5081. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  5082. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5083. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  5084. }
  5085. } break;
  5086. case LLM_ARCH_PHI3:
  5087. {
  5088. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab });
  5089. // output
  5090. {
  5091. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd });
  5092. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab });
  5093. }
  5094. for (int i = 0; i < n_layer; ++i) {
  5095. ggml_context* ctx_layer = ctx_for_layer(i);
  5096. ggml_context* ctx_split = ctx_for_layer_split(i);
  5097. auto & layer = model.layers[i];
  5098. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd });
  5099. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED);
  5100. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd });
  5101. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd });
  5102. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd });
  5103. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff });
  5104. layer.rope_long = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight"), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
  5105. layer.rope_short = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight"), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
  5106. }
  5107. } break;
  5108. case LLM_ARCH_PLAMO:
  5109. {
  5110. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5111. // output
  5112. {
  5113. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5114. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5115. }
  5116. for (int i = 0; i < n_layer; ++i) {
  5117. ggml_context * ctx_layer = ctx_for_layer(i);
  5118. ggml_context * ctx_split = ctx_for_layer_split(i);
  5119. auto & layer = model.layers[i];
  5120. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5121. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5122. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5123. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5124. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5125. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5126. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5127. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5128. }
  5129. } break;
  5130. case LLM_ARCH_GPT2:
  5131. {
  5132. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5133. model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train});
  5134. // output
  5135. {
  5136. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5137. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  5138. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5139. }
  5140. for (int i = 0; i < n_layer; ++i) {
  5141. ggml_context * ctx_layer = ctx_for_layer(i);
  5142. ggml_context * ctx_split = ctx_for_layer_split(i);
  5143. auto & layer = model.layers[i];
  5144. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5145. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  5146. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  5147. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  5148. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5149. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  5150. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5151. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  5152. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  5153. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  5154. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5155. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  5156. }
  5157. } break;
  5158. case LLM_ARCH_CODESHELL:
  5159. {
  5160. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5161. // output
  5162. {
  5163. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5164. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  5165. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5166. }
  5167. for (int i = 0; i < n_layer; ++i) {
  5168. ggml_context * ctx_layer = ctx_for_layer(i);
  5169. ggml_context * ctx_split = ctx_for_layer_split(i);
  5170. auto & layer = model.layers[i];
  5171. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5172. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  5173. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  5174. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  5175. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5176. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  5177. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5178. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  5179. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  5180. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  5181. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5182. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  5183. }
  5184. } break;
  5185. case LLM_ARCH_ORION:
  5186. {
  5187. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5188. {
  5189. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5190. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  5191. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5192. }
  5193. for (int i = 0; i < n_layer; ++i) {
  5194. ggml_context * ctx_layer = ctx_for_layer(i);
  5195. ggml_context * ctx_split = ctx_for_layer_split(i);
  5196. auto & layer = model.layers[i];
  5197. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5198. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  5199. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5200. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5201. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5202. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5203. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5204. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  5205. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5206. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5207. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5208. }
  5209. } break;
  5210. case LLM_ARCH_INTERNLM2:
  5211. {
  5212. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5213. // output
  5214. {
  5215. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5216. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5217. }
  5218. for (int i = 0; i < n_layer; ++i) {
  5219. ggml_context * ctx_layer = ctx_for_layer(i);
  5220. ggml_context * ctx_split = ctx_for_layer_split(i);
  5221. auto & layer = model.layers[i];
  5222. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5223. // layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  5224. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5225. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5226. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5227. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5228. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5229. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5230. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5231. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5232. }
  5233. } break;
  5234. case LLM_ARCH_GEMMA:
  5235. {
  5236. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5237. // output
  5238. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5239. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
  5240. const int64_t n_ff = hparams.n_ff;
  5241. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  5242. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  5243. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  5244. for (uint32_t i = 0; i < n_layer; ++i) {
  5245. ggml_context * ctx_layer = ctx_for_layer(i);
  5246. ggml_context * ctx_split = ctx_for_layer_split(i);
  5247. auto & layer = model.layers[i];
  5248. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5249. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * hparams.n_head});
  5250. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
  5251. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
  5252. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * hparams.n_head, n_embd});
  5253. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5254. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5255. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5256. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5257. }
  5258. } break;
  5259. case LLM_ARCH_STARCODER2:
  5260. {
  5261. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5262. // output
  5263. {
  5264. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5265. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  5266. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  5267. // if output is NULL, init from the input tok embed
  5268. if (model.output == NULL) {
  5269. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
  5270. }
  5271. }
  5272. for (int i = 0; i < n_layer; ++i) {
  5273. ggml_context * ctx_layer = ctx_for_layer(i);
  5274. ggml_context * ctx_split = ctx_for_layer_split(i);
  5275. auto & layer = model.layers[i];
  5276. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5277. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  5278. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5279. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5280. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5281. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5282. // optional bias tensors
  5283. layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
  5284. layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
  5285. layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
  5286. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  5287. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5288. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  5289. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5290. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5291. // optional bias tensors
  5292. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  5293. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP , "bias", i), { n_ff});
  5294. }
  5295. } break;
  5296. case LLM_ARCH_MAMBA:
  5297. {
  5298. const int64_t d_conv = hparams.ssm_d_conv;
  5299. const int64_t d_inner = hparams.ssm_d_inner;
  5300. const int64_t d_state = hparams.ssm_d_state;
  5301. const int64_t dt_rank = hparams.ssm_dt_rank;
  5302. // only an expansion factor of 2 is supported for now
  5303. GGML_ASSERT(2 * n_embd == d_inner);
  5304. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5305. // output
  5306. {
  5307. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5308. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  5309. // if output is NULL, init from the input tok embed, duplicated to allow offloading
  5310. if (model.output == NULL) {
  5311. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
  5312. }
  5313. }
  5314. for (int i = 0; i < n_layer; ++i) {
  5315. ggml_context * ctx_layer = ctx_for_layer(i);
  5316. ggml_context * ctx_split = ctx_for_layer_split(i);
  5317. auto & layer = model.layers[i];
  5318. // norm
  5319. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5320. layer.ssm_in = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner});
  5321. layer.ssm_conv1d = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner});
  5322. layer.ssm_conv1d_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner});
  5323. layer.ssm_x = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state});
  5324. layer.ssm_dt = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner});
  5325. layer.ssm_dt_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner});
  5326. // no "weight" suffix for these
  5327. layer.ssm_a = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner});
  5328. layer.ssm_d = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_D, i), {d_inner});
  5329. // out_proj
  5330. layer.ssm_out = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd});
  5331. }
  5332. } break;
  5333. case LLM_ARCH_XVERSE:
  5334. {
  5335. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5336. {
  5337. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5338. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5339. }
  5340. for (int i = 0; i < n_layer; ++i) {
  5341. ggml_context * ctx_layer = ctx_for_layer(i);
  5342. ggml_context * ctx_split = ctx_for_layer_split(i);
  5343. auto & layer = model.layers[i];
  5344. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5345. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5346. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5347. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5348. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5349. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5350. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5351. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5352. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5353. }
  5354. } break;
  5355. case LLM_ARCH_COMMAND_R:
  5356. {
  5357. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5358. // output
  5359. {
  5360. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5361. // init output from the input tok embed
  5362. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
  5363. }
  5364. for (int i = 0; i < n_layer; ++i) {
  5365. ggml_context * ctx_layer = ctx_for_layer(i);
  5366. ggml_context * ctx_split = ctx_for_layer_split(i);
  5367. auto & layer = model.layers[i];
  5368. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5369. if (n_layer >= 64){
  5370. layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {hparams.n_embd_head_k, hparams.n_head});
  5371. layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {hparams.n_embd_head_k, hparams.n_head_kv});
  5372. }
  5373. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5374. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5375. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5376. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5377. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5378. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5379. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5380. }
  5381. } break;
  5382. case LLM_ARCH_OLMO: // adapted from LLM_ARCH_LLAMA with norm params removed
  5383. {
  5384. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5385. // output
  5386. {
  5387. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  5388. // if output is NULL, init from the input tok embed
  5389. if (model.output == NULL) {
  5390. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
  5391. }
  5392. }
  5393. for (int i = 0; i < n_layer; ++i) {
  5394. ggml_context * ctx_split = ctx_for_layer_split(i);
  5395. auto & layer = model.layers[i];
  5396. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5397. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5398. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5399. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5400. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5401. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5402. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5403. }
  5404. } break;
  5405. case LLM_ARCH_GPTNEOX:
  5406. {
  5407. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5408. // output
  5409. {
  5410. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5411. model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
  5412. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5413. }
  5414. for (int i = 0; i < n_layer; ++i) {
  5415. ggml_context * ctx_layer = ctx_for_layer(i);
  5416. ggml_context * ctx_split = ctx_for_layer_split(i);
  5417. auto & layer = model.layers[i];
  5418. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5419. layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
  5420. layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
  5421. layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
  5422. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5423. layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
  5424. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5425. layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
  5426. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
  5427. layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
  5428. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5429. layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
  5430. }
  5431. } break;
  5432. case LLM_ARCH_ARCTIC:
  5433. {
  5434. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5435. // output
  5436. {
  5437. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5438. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  5439. // if output is NULL, init from the input tok embed
  5440. if (model.output == NULL) {
  5441. model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
  5442. }
  5443. }
  5444. for (int i = 0; i < n_layer; ++i) {
  5445. ggml_context * ctx_layer = ctx_for_layer(i);
  5446. ggml_context * ctx_split = ctx_for_layer_split(i);
  5447. auto & layer = model.layers[i];
  5448. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5449. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
  5450. layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
  5451. layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
  5452. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
  5453. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5454. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_embd});
  5455. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_embd, n_embd});
  5456. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_embd});
  5457. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
  5458. layer.ffn_norm_exps = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM_EXPS, "weight", i), {n_embd});
  5459. layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, false);
  5460. layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert});
  5461. layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
  5462. }
  5463. } break;
  5464. case LLM_ARCH_DEEPSEEK2:
  5465. {
  5466. bool is_lite = (hparams.n_layer == 27);
  5467. const uint32_t n_embd_head_qk_rope = hparams.n_rot;
  5468. const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  5469. const uint32_t q_lora_rank = hparams.n_lora_q;
  5470. const uint32_t kv_lora_rank = hparams.n_lora_kv;
  5471. const uint32_t n_ff_exp = hparams.n_ff_exp;
  5472. model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  5473. // output
  5474. {
  5475. model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  5476. model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
  5477. }
  5478. for (int i = 0; i < n_layer; ++i) {
  5479. ggml_context * ctx_layer = ctx_for_layer(i);
  5480. ggml_context * ctx_split = ctx_for_layer_split(i);
  5481. auto & layer = model.layers[i];
  5482. layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  5483. if (!is_lite) {
  5484. layer.attn_q_a_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank});
  5485. }
  5486. layer.attn_kv_a_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank});
  5487. if (!is_lite) {
  5488. layer.wq_a = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank});
  5489. layer.wq_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, hparams.n_head * hparams.n_embd_head_k});
  5490. } else {
  5491. layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa});
  5492. }
  5493. layer.wkv_a_mqa = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + n_embd_head_qk_rope});
  5494. layer.wkv_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, hparams.n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)});
  5495. layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {hparams.n_head * hparams.n_embd_head_v, n_embd});
  5496. layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  5497. if ((uint32_t) i < hparams.n_layer_dense_lead) {
  5498. layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  5499. layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  5500. layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  5501. } else {
  5502. layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
  5503. GGML_ASSERT(hparams.n_expert > 0);
  5504. GGML_ASSERT(hparams.n_expert_used > 0);
  5505. // MoE branch
  5506. layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert});
  5507. layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert});
  5508. layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert});
  5509. // Shared expert branch
  5510. layer.ffn_gate_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * hparams.n_expert_shared});
  5511. layer.ffn_down_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * hparams.n_expert_shared, n_embd});
  5512. layer.ffn_up_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * hparams.n_expert_shared});
  5513. }
  5514. }
  5515. } break;
  5516. default:
  5517. throw std::runtime_error("unknown architecture");
  5518. }
  5519. }
  5520. ml.done_getting_tensors();
  5521. ml.init_mappings(true, use_mlock ? &model.mlock_mmaps : nullptr);
  5522. model.mappings.reserve(ml.mappings.size());
  5523. // create the backend buffers
  5524. std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_bufs;
  5525. ctx_bufs.reserve(ctx_map.size());
  5526. // Ensure we have enough capacity for the maximum backend buffer we will potentially create
  5527. size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
  5528. model.bufs.reserve(n_max_backend_buffer);
  5529. for (auto & it : ctx_map) {
  5530. ggml_backend_buffer_type_t buft = it.first;
  5531. ggml_context * ctx = it.second;
  5532. llama_buf_map bufs;
  5533. bufs.reserve(n_max_backend_buffer);
  5534. // only the mmap region containing the tensors in the model is mapped to the backend buffer
  5535. // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
  5536. // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
  5537. if (ml.use_mmap && use_mmap_buffer && buft == llama_default_buffer_type_cpu(true)) {
  5538. for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
  5539. void * addr = nullptr;
  5540. size_t first, last;
  5541. ml.get_mapping_range(&first, &last, &addr, idx, ctx);
  5542. if (first >= last) {
  5543. continue;
  5544. }
  5545. ggml_backend_buffer_t buf = ggml_backend_cpu_buffer_from_ptr((char *) addr + first, last - first);
  5546. if (buf == nullptr) {
  5547. throw std::runtime_error("unable to allocate backend CPU buffer");
  5548. }
  5549. model.bufs.push_back(buf);
  5550. bufs.emplace(idx, buf);
  5551. #ifdef GGML_USE_CUDA
  5552. if (n_layer >= n_gpu_layers) {
  5553. ggml_backend_cuda_register_host_buffer(
  5554. ggml_backend_buffer_get_base(buf),
  5555. ggml_backend_buffer_get_size(buf));
  5556. }
  5557. #endif
  5558. }
  5559. }
  5560. #ifdef GGML_USE_METAL
  5561. else if (ml.use_mmap && use_mmap_buffer && buft == ggml_backend_metal_buffer_type()) {
  5562. for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
  5563. const size_t max_size = ggml_get_max_tensor_size(ctx);
  5564. void * addr = nullptr;
  5565. size_t first, last;
  5566. ml.get_mapping_range(&first, &last, &addr, idx, ctx);
  5567. if (first >= last) {
  5568. continue;
  5569. }
  5570. ggml_backend_buffer_t buf = ggml_backend_metal_buffer_from_ptr((char *) addr + first, last - first, max_size);
  5571. if (buf == nullptr) {
  5572. throw std::runtime_error("unable to allocate backend metal buffer");
  5573. }
  5574. model.bufs.push_back(buf);
  5575. bufs.emplace(idx, buf);
  5576. }
  5577. }
  5578. #endif
  5579. else {
  5580. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  5581. if (buf == nullptr) {
  5582. throw std::runtime_error("unable to allocate backend buffer");
  5583. }
  5584. model.bufs.push_back(buf);
  5585. if (use_mlock && ggml_backend_buffer_is_host(buf)) {
  5586. model.mlock_bufs.emplace_back(new llama_mlock);
  5587. auto & mlock_buf = model.mlock_bufs.back();
  5588. mlock_buf->init (ggml_backend_buffer_get_base(buf));
  5589. mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
  5590. }
  5591. for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
  5592. bufs.emplace(idx, buf);
  5593. }
  5594. }
  5595. if (bufs.empty()) {
  5596. throw std::runtime_error("failed to allocate buffer");
  5597. }
  5598. for (auto & buf : bufs) {
  5599. // indicate that this buffer contains weights
  5600. // this is used by ggml_backend_sched to improve op scheduling -> ops that use a weight are preferably scheduled to the backend that contains the weight
  5601. ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
  5602. }
  5603. ctx_bufs.emplace_back(ctx, bufs);
  5604. }
  5605. if (llama_supports_gpu_offload()) {
  5606. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  5607. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  5608. if (n_gpu_layers > (int) hparams.n_layer) {
  5609. LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
  5610. }
  5611. const int max_backend_supported_layers = hparams.n_layer + 1;
  5612. const int max_offloadable_layers = hparams.n_layer + 1;
  5613. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  5614. }
  5615. // print memory requirements
  5616. for (ggml_backend_buffer_t buf : model.bufs) {
  5617. LLAMA_LOG_INFO("%s: %10s buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
  5618. }
  5619. // populate tensors_by_name
  5620. for (ggml_context * ctx : model.ctxs) {
  5621. for (auto * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
  5622. model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
  5623. }
  5624. }
  5625. // load tensor data
  5626. for (auto & it : ctx_bufs) {
  5627. ggml_context * ctx = it.first;
  5628. auto & bufs = it.second;
  5629. if (!ml.load_all_data(ctx, bufs, use_mlock ? &model.mlock_mmaps : NULL, progress_callback, progress_callback_user_data)) {
  5630. return false;
  5631. }
  5632. }
  5633. if (use_mmap_buffer) {
  5634. for (auto & mapping : ml.mappings) {
  5635. model.mappings.emplace_back(std::move(mapping));
  5636. }
  5637. }
  5638. // loading time will be recalculate after the first eval, so
  5639. // we take page faults deferred by mmap() into consideration
  5640. model.t_load_us = ggml_time_us() - model.t_start_us;
  5641. return true;
  5642. }
  5643. // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
  5644. static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
  5645. try {
  5646. llama_model_loader ml(fname, params.use_mmap, params.check_tensors, params.kv_overrides);
  5647. model.hparams.vocab_only = params.vocab_only;
  5648. try {
  5649. llm_load_arch(ml, model);
  5650. } catch(const std::exception & e) {
  5651. throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
  5652. }
  5653. try {
  5654. llm_load_hparams(ml, model);
  5655. } catch(const std::exception & e) {
  5656. throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
  5657. }
  5658. try {
  5659. llm_load_vocab(ml, model);
  5660. } catch(const std::exception & e) {
  5661. throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
  5662. }
  5663. llm_load_print_meta(ml, model);
  5664. if (model.vocab.type != LLAMA_VOCAB_TYPE_NONE &&
  5665. model.hparams.n_vocab != model.vocab.id_to_token.size()) {
  5666. throw std::runtime_error("vocab size mismatch");
  5667. }
  5668. if (params.vocab_only) {
  5669. LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
  5670. return 0;
  5671. }
  5672. #ifdef GGML_USE_KOMPUTE
  5673. if (params.n_gpu_layers > 0 && (
  5674. !(model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON)
  5675. || !(
  5676. model.ftype == LLAMA_FTYPE_ALL_F32 ||
  5677. model.ftype == LLAMA_FTYPE_MOSTLY_F16 ||
  5678. model.ftype == LLAMA_FTYPE_MOSTLY_BF16 ||
  5679. model.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
  5680. model.ftype == LLAMA_FTYPE_MOSTLY_Q4_1
  5681. )
  5682. )) {
  5683. // TODO(cebtenzzre): propagate this error outside of llama_load_model_from_file
  5684. LLAMA_LOG_WARN("%s: disabling Kompute due to unsupported model arch or quantization\n", __func__);
  5685. params.n_gpu_layers = 0;
  5686. }
  5687. #endif
  5688. #ifdef GGML_USE_SYCL
  5689. if (params.split_mode == LLAMA_SPLIT_MODE_NONE) {
  5690. ggml_backend_sycl_set_single_device_mode(params.main_gpu);
  5691. //SYCL use device index (0, 1, 2) directly, uer input device id, then convert to device index.
  5692. params.main_gpu = ggml_backend_sycl_get_device_index(params.main_gpu);
  5693. } else {
  5694. ggml_backend_sycl_set_mul_device_mode();
  5695. }
  5696. #endif
  5697. if (!llm_load_tensors(
  5698. ml, model, params.n_gpu_layers, params.split_mode, params.main_gpu, params.tensor_split, params.use_mlock,
  5699. params.progress_callback, params.progress_callback_user_data
  5700. )) {
  5701. return -2;
  5702. }
  5703. } catch (const std::exception & err) {
  5704. LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
  5705. return -1;
  5706. }
  5707. return 0;
  5708. }
  5709. //
  5710. // llm_build
  5711. //
  5712. using llm_build_cb = std::function<void(struct ggml_tensor * cur, const char * name, int nl)>;
  5713. enum llm_ffn_op_type {
  5714. LLM_FFN_SILU,
  5715. LLM_FFN_GELU,
  5716. LLM_FFN_RELU,
  5717. LLM_FFN_RELU_SQR,
  5718. };
  5719. enum llm_ffn_gate_type {
  5720. LLM_FFN_SEQ,
  5721. LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
  5722. };
  5723. enum llm_norm_type {
  5724. LLM_NORM,
  5725. LLM_NORM_RMS,
  5726. };
  5727. static struct ggml_tensor * llm_build_inp_embd(
  5728. struct ggml_context * ctx,
  5729. struct llama_context & lctx,
  5730. const llama_hparams & hparams,
  5731. const llama_batch & batch,
  5732. struct ggml_tensor * tok_embd,
  5733. const llm_build_cb & cb) {
  5734. const int64_t n_embd = hparams.n_embd;
  5735. struct ggml_tensor * inpL;
  5736. if (batch.token) {
  5737. lctx.inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
  5738. cb(lctx.inp_tokens, "inp_tokens", -1);
  5739. ggml_set_input(lctx.inp_tokens);
  5740. inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens);
  5741. } else {
  5742. lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
  5743. inpL = lctx.inp_embd;
  5744. ggml_set_input(lctx.inp_embd);
  5745. }
  5746. cb(inpL, "inp_embd", -1);
  5747. return inpL;
  5748. }
  5749. static void llm_build_kv_store(
  5750. struct ggml_context * ctx,
  5751. const llama_hparams & hparams,
  5752. const llama_cparams & cparams,
  5753. const llama_kv_cache & kv,
  5754. struct ggml_cgraph * graph,
  5755. struct ggml_tensor * k_cur,
  5756. struct ggml_tensor * v_cur,
  5757. int32_t n_tokens,
  5758. int32_t kv_head,
  5759. const llm_build_cb & cb,
  5760. int64_t il) {
  5761. const int64_t n_ctx = cparams.n_ctx;
  5762. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  5763. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  5764. GGML_ASSERT(kv.size == n_ctx);
  5765. struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_k_gqa,
  5766. (ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa))*kv_head);
  5767. cb(k_cache_view, "k_cache_view", il);
  5768. // note: storing RoPE-ed version of K in the KV cache
  5769. ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view));
  5770. assert(v_cur->ne[0] == n_embd_v_gqa && v_cur->ne[1] == n_tokens);
  5771. struct ggml_tensor * v_cache_view = nullptr;
  5772. if (cparams.flash_attn) {
  5773. v_cache_view = ggml_view_1d(ctx, kv.v_l[il], n_tokens*n_embd_v_gqa,
  5774. (kv_head)*ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa));
  5775. } else {
  5776. // note: the V cache is transposed when not using flash attention
  5777. v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa,
  5778. ( n_ctx)*ggml_element_size(kv.v_l[il]),
  5779. (kv_head)*ggml_element_size(kv.v_l[il]));
  5780. v_cur = ggml_transpose(ctx, v_cur);
  5781. }
  5782. cb(v_cache_view, "v_cache_view", il);
  5783. ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur, v_cache_view));
  5784. }
  5785. static struct ggml_tensor * llm_build_norm(
  5786. struct ggml_context * ctx,
  5787. struct ggml_tensor * cur,
  5788. const llama_hparams & hparams,
  5789. struct ggml_tensor * mw,
  5790. struct ggml_tensor * mb,
  5791. llm_norm_type type,
  5792. const llm_build_cb & cb,
  5793. int il) {
  5794. switch (type) {
  5795. case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break;
  5796. case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break;
  5797. }
  5798. if (mw || mb) {
  5799. cb(cur, "norm", il);
  5800. }
  5801. if (mw) {
  5802. cur = ggml_mul(ctx, cur, mw);
  5803. if (mb) {
  5804. cb(cur, "norm_w", il);
  5805. }
  5806. }
  5807. if (mb) {
  5808. cur = ggml_add(ctx, cur, mb);
  5809. }
  5810. return cur;
  5811. }
  5812. static struct ggml_tensor * llm_build_ffn(
  5813. struct ggml_context * ctx,
  5814. struct ggml_tensor * cur,
  5815. struct ggml_tensor * up,
  5816. struct ggml_tensor * up_b,
  5817. struct ggml_tensor * gate,
  5818. struct ggml_tensor * gate_b,
  5819. struct ggml_tensor * down,
  5820. struct ggml_tensor * down_b,
  5821. struct ggml_tensor * act_scales,
  5822. llm_ffn_op_type type_op,
  5823. llm_ffn_gate_type type_gate,
  5824. const llm_build_cb & cb,
  5825. int il) {
  5826. struct ggml_tensor * tmp = up ? ggml_mul_mat(ctx, up, cur) : cur;
  5827. cb(tmp, "ffn_up", il);
  5828. if (up_b) {
  5829. tmp = ggml_add(ctx, tmp, up_b);
  5830. cb(tmp, "ffn_up_b", il);
  5831. }
  5832. if (gate) {
  5833. switch (type_gate) {
  5834. case LLM_FFN_SEQ:
  5835. {
  5836. cur = ggml_mul_mat(ctx, gate, tmp);
  5837. cb(cur, "ffn_gate", il);
  5838. } break;
  5839. case LLM_FFN_PAR:
  5840. {
  5841. cur = ggml_mul_mat(ctx, gate, cur);
  5842. cb(cur, "ffn_gate", il);
  5843. } break;
  5844. }
  5845. if (gate_b) {
  5846. cur = ggml_add(ctx, cur, gate_b);
  5847. cb(cur, "ffn_gate_b", il);
  5848. }
  5849. } else {
  5850. cur = tmp;
  5851. }
  5852. switch (type_op) {
  5853. case LLM_FFN_SILU:
  5854. {
  5855. cur = ggml_silu(ctx, cur);
  5856. cb(cur, "ffn_silu", il);
  5857. } break;
  5858. case LLM_FFN_GELU:
  5859. {
  5860. cur = ggml_gelu(ctx, cur);
  5861. cb(cur, "ffn_gelu", il);
  5862. if (act_scales != NULL) {
  5863. cur = ggml_div(ctx, cur, act_scales);
  5864. cb(cur, "ffn_act", il);
  5865. }
  5866. } break;
  5867. case LLM_FFN_RELU:
  5868. {
  5869. cur = ggml_relu(ctx, cur);
  5870. cb(cur, "ffn_relu", il);
  5871. } break;
  5872. case LLM_FFN_RELU_SQR:
  5873. {
  5874. cur = ggml_relu(ctx, cur);
  5875. cb(cur, "ffn_relu", il);
  5876. cur = ggml_sqr(ctx, cur);
  5877. cb(cur, "ffn_sqr(relu)", il);
  5878. } break;
  5879. }
  5880. if (type_gate == LLM_FFN_PAR) {
  5881. cur = ggml_mul(ctx, cur, tmp);
  5882. cb(cur, "ffn_gate_par", il);
  5883. }
  5884. cur = ggml_mul_mat(ctx, down, cur);
  5885. if (down_b) {
  5886. cb(cur, "ffn_down", il);
  5887. }
  5888. if (down_b) {
  5889. cur = ggml_add(ctx, cur, down_b);
  5890. }
  5891. return cur;
  5892. }
  5893. static struct ggml_tensor * llm_build_moe_ffn(
  5894. struct ggml_context * ctx,
  5895. struct ggml_tensor * cur,
  5896. struct ggml_tensor * gate_inp,
  5897. struct ggml_tensor * up_exps,
  5898. struct ggml_tensor * gate_exps,
  5899. struct ggml_tensor * down_exps,
  5900. int64_t n_expert,
  5901. int64_t n_expert_used,
  5902. llm_ffn_op_type type_op,
  5903. bool norm_w,
  5904. bool scale_w,
  5905. float w_scale,
  5906. const llm_build_cb & cb,
  5907. int il) {
  5908. int64_t n_embd = cur->ne[0];
  5909. int64_t n_tokens = cur->ne[1];
  5910. ggml_tensor * logits = ggml_mul_mat(ctx, gate_inp, cur); // [n_expert, n_tokens]
  5911. cb(logits, "ffn_moe_logits", il);
  5912. ggml_tensor * probs = ggml_soft_max(ctx, logits); // [n_expert, n_tokens]
  5913. cb(probs, "ffn_moe_probs", il);
  5914. // select experts
  5915. ggml_tensor * selected_experts = ggml_top_k(ctx, probs, n_expert_used); // [n_expert_used, n_tokens]
  5916. cb(selected_experts->src[0], "ffn_moe_argsort", il);
  5917. cb(selected_experts, "ffn_moe_topk", il);
  5918. ggml_tensor * weights = ggml_get_rows(ctx,
  5919. ggml_reshape_3d(ctx, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens]
  5920. cb(weights, "ffn_moe_weights", il);
  5921. if (norm_w) {
  5922. weights = ggml_reshape_2d(ctx, weights, n_expert_used, n_tokens);
  5923. ggml_tensor * weights_sum = ggml_sum_rows(ctx, weights); // [1, n_tokens]
  5924. cb(weights_sum, "ffn_moe_weights_sum", il);
  5925. weights = ggml_div(ctx, weights, weights_sum); // [n_expert_used, n_tokens]
  5926. cb(weights, "ffn_moe_weights_norm", il);
  5927. weights = ggml_reshape_3d(ctx, weights, 1, n_expert_used, n_tokens);
  5928. }
  5929. if (scale_w) {
  5930. weights = ggml_scale(ctx, weights, w_scale);
  5931. cb(weights, "ffn_moe_weights_scaled", il);
  5932. }
  5933. cur = ggml_reshape_3d(ctx, cur, n_embd, 1, n_tokens);
  5934. ggml_tensor * up = ggml_mul_mat_id(ctx, up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
  5935. cb(up, "ffn_moe_up", il);
  5936. ggml_tensor * gate = ggml_mul_mat_id(ctx, gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
  5937. cb(gate, "ffn_moe_gate", il);
  5938. switch (type_op) {
  5939. case LLM_FFN_SILU:
  5940. {
  5941. gate = ggml_silu(ctx, gate);
  5942. cb(gate, "ffn_moe_silu", il);
  5943. } break;
  5944. case LLM_FFN_GELU:
  5945. {
  5946. gate = ggml_gelu(ctx, gate);
  5947. cb(gate, "ffn_moe_gelu", il);
  5948. } break;
  5949. default:
  5950. GGML_ASSERT(false);
  5951. }
  5952. ggml_tensor * par = ggml_mul(ctx, up, gate); // [n_ff, n_expert_used, n_tokens]
  5953. cb(par, "ffn_moe_gate_par", il);
  5954. ggml_tensor * experts = ggml_mul_mat_id(ctx, down_exps, par, selected_experts); // [n_embd, n_expert_used, n_tokens]
  5955. cb(experts, "ffn_moe_down", il);
  5956. experts = ggml_mul(ctx, experts, weights);
  5957. // aggregate experts
  5958. ggml_tensor * moe_out = nullptr;
  5959. for (int i = 0; i < n_expert_used; ++i) {
  5960. ggml_tensor * cur_expert = ggml_view_2d(ctx, experts, n_embd, n_tokens,
  5961. experts->nb[2], i*experts->nb[1]);
  5962. if (i == 0) {
  5963. moe_out = cur_expert;
  5964. } else {
  5965. moe_out = ggml_add(ctx, moe_out, cur_expert);
  5966. }
  5967. }
  5968. if (n_expert_used == 1) {
  5969. // avoid returning a non-contiguous tensor
  5970. moe_out = ggml_cont(ctx, moe_out);
  5971. }
  5972. return moe_out;
  5973. }
  5974. static struct ggml_tensor * llm_build_kqv(
  5975. struct ggml_context * ctx,
  5976. const llama_model & model,
  5977. const llama_hparams & hparams,
  5978. const llama_cparams & cparams,
  5979. const llama_kv_cache & kv,
  5980. struct ggml_cgraph * graph,
  5981. struct ggml_tensor * wo,
  5982. struct ggml_tensor * wo_b,
  5983. struct ggml_tensor * q_cur,
  5984. struct ggml_tensor * kq_mask,
  5985. int32_t n_tokens,
  5986. int32_t n_kv,
  5987. float kq_scale,
  5988. const llm_build_cb & cb,
  5989. int il) {
  5990. const int64_t n_ctx = cparams.n_ctx;
  5991. const int64_t n_head = hparams.n_head;
  5992. const int64_t n_head_kv = hparams.n_head_kv;
  5993. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  5994. const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  5995. const int64_t n_embd_head_v = hparams.n_embd_head_v;
  5996. const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  5997. struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
  5998. cb(q, "q", il);
  5999. struct ggml_tensor * k =
  6000. ggml_view_3d(ctx, kv.k_l[il],
  6001. n_embd_head_k, n_kv, n_head_kv,
  6002. ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa),
  6003. ggml_row_size(kv.k_l[il]->type, n_embd_head_k),
  6004. 0);
  6005. cb(k, "k", il);
  6006. struct ggml_tensor * cur;
  6007. if (cparams.flash_attn) {
  6008. GGML_UNUSED(model);
  6009. GGML_UNUSED(n_ctx);
  6010. // split cached v into n_head heads (not transposed)
  6011. struct ggml_tensor * v =
  6012. ggml_view_3d(ctx, kv.v_l[il],
  6013. n_embd_head_v, n_kv, n_head_kv,
  6014. ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa),
  6015. ggml_row_size(kv.v_l[il]->type, n_embd_head_v),
  6016. 0);
  6017. cb(v, "v", il);
  6018. cur = ggml_flash_attn_ext(ctx, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias);
  6019. if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX) {
  6020. ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
  6021. }
  6022. cur = ggml_reshape_2d(ctx, cur, n_embd_head_v*n_head, n_tokens);
  6023. } else {
  6024. struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
  6025. cb(kq, "kq", il);
  6026. if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX) {
  6027. // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
  6028. // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
  6029. ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  6030. }
  6031. if (model.arch == LLM_ARCH_GROK) {
  6032. // need to do the following:
  6033. // multiply by attn_output_multiplyer of 0.08838834764831845
  6034. // and then :
  6035. // kq = 30 * tanh(kq / 30)
  6036. // before the softmax below
  6037. //try from phi2
  6038. //ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
  6039. kq = ggml_tanh(ctx, ggml_scale(ctx, kq, 0.08838834764831845f/30.0f));
  6040. kq = ggml_scale(ctx, kq, 30);
  6041. }
  6042. kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias);
  6043. cb(kq, "kq_soft_max_ext", il);
  6044. GGML_ASSERT(kv.size == n_ctx);
  6045. // split cached v into n_head heads
  6046. struct ggml_tensor * v =
  6047. ggml_view_3d(ctx, kv.v_l[il],
  6048. n_kv, n_embd_head_v, n_head_kv,
  6049. ggml_element_size(kv.v_l[il])*n_ctx,
  6050. ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head_v,
  6051. 0);
  6052. cb(v, "v", il);
  6053. struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
  6054. cb(kqv, "kqv", il);
  6055. struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
  6056. cb(kqv_merged, "kqv_merged", il);
  6057. cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_v*n_head, n_tokens);
  6058. cb(cur, "kqv_merged_cont", il);
  6059. }
  6060. ggml_build_forward_expand(graph, cur);
  6061. cur = ggml_mul_mat(ctx, wo, cur);
  6062. if (wo_b) {
  6063. cb(cur, "kqv_wo", il);
  6064. }
  6065. if (wo_b) {
  6066. cur = ggml_add(ctx, cur, wo_b);
  6067. }
  6068. return cur;
  6069. }
  6070. static struct ggml_tensor * llm_build_kv(
  6071. struct ggml_context * ctx,
  6072. const llama_model & model,
  6073. const llama_hparams & hparams,
  6074. const llama_cparams & cparams,
  6075. const llama_kv_cache & kv,
  6076. struct ggml_cgraph * graph,
  6077. struct ggml_tensor * wo,
  6078. struct ggml_tensor * wo_b,
  6079. struct ggml_tensor * k_cur,
  6080. struct ggml_tensor * v_cur,
  6081. struct ggml_tensor * q_cur,
  6082. struct ggml_tensor * kq_mask,
  6083. int32_t n_tokens,
  6084. int32_t kv_head,
  6085. int32_t n_kv,
  6086. float kq_scale,
  6087. const llm_build_cb & cb,
  6088. int il) {
  6089. // these nodes are added to the graph together so that they are not reordered
  6090. // by doing so, the number of splits in the graph is reduced
  6091. ggml_build_forward_expand(graph, q_cur);
  6092. ggml_build_forward_expand(graph, k_cur);
  6093. ggml_build_forward_expand(graph, v_cur);
  6094. llm_build_kv_store(ctx, hparams, cparams, kv, graph, k_cur, v_cur, n_tokens, kv_head, cb, il);
  6095. struct ggml_tensor * cur;
  6096. cur = llm_build_kqv(ctx, model, hparams, cparams, kv, graph, wo, wo_b,
  6097. q_cur, kq_mask, n_tokens, n_kv, kq_scale, cb, il);
  6098. cb(cur, "kqv_out", il);
  6099. return cur;
  6100. }
  6101. struct llm_build_context {
  6102. const llama_model & model;
  6103. llama_context & lctx;
  6104. const llama_hparams & hparams;
  6105. const llama_cparams & cparams;
  6106. const llama_batch & batch;
  6107. const llama_kv_cache & kv_self;
  6108. const int64_t n_embd;
  6109. const int64_t n_layer;
  6110. const int64_t n_rot;
  6111. const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
  6112. const int64_t n_head;
  6113. const int64_t n_head_kv;
  6114. const int64_t n_embd_head_k;
  6115. const int64_t n_embd_k_gqa;
  6116. const int64_t n_embd_head_v;
  6117. const int64_t n_embd_v_gqa;
  6118. const int64_t n_expert;
  6119. const int64_t n_expert_used;
  6120. const float freq_base;
  6121. const float freq_scale;
  6122. const float ext_factor;
  6123. const float attn_factor;
  6124. const float beta_fast;
  6125. const float beta_slow;
  6126. const float norm_eps;
  6127. const float norm_rms_eps;
  6128. const int32_t n_tokens;
  6129. const int32_t n_kv; // size of KV cache to consider (n_kv <= kv_self.size)
  6130. const int32_t n_outputs;
  6131. const int32_t kv_head; // index of where we store new KV data in the cache
  6132. const int32_t n_ctx_orig;
  6133. const bool flash_attn;
  6134. const enum llama_pooling_type pooling_type;
  6135. const enum llama_rope_type rope_type;
  6136. const llm_build_cb & cb;
  6137. std::vector<uint8_t> & buf_compute_meta;
  6138. struct ggml_context * ctx0 = nullptr;
  6139. // TODO: consider making the entire interface noexcept
  6140. llm_build_context(
  6141. llama_context & lctx,
  6142. const llama_batch & batch,
  6143. const llm_build_cb & cb,
  6144. bool worst_case) :
  6145. model (lctx.model),
  6146. lctx (lctx),
  6147. hparams (model.hparams),
  6148. cparams (lctx.cparams),
  6149. batch (batch),
  6150. kv_self (lctx.kv_self),
  6151. n_embd (hparams.n_embd),
  6152. n_layer (hparams.n_layer),
  6153. n_rot (hparams.n_rot),
  6154. n_ctx (cparams.n_ctx),
  6155. n_head (hparams.n_head),
  6156. n_head_kv (hparams.n_head_kv),
  6157. n_embd_head_k (hparams.n_embd_head_k),
  6158. n_embd_k_gqa (hparams.n_embd_k_gqa()),
  6159. n_embd_head_v (hparams.n_embd_head_v),
  6160. n_embd_v_gqa (hparams.n_embd_v_gqa()),
  6161. n_expert (hparams.n_expert),
  6162. n_expert_used (hparams.n_expert_used),
  6163. freq_base (cparams.rope_freq_base),
  6164. freq_scale (cparams.rope_freq_scale),
  6165. ext_factor (cparams.yarn_ext_factor),
  6166. attn_factor (cparams.yarn_attn_factor),
  6167. beta_fast (cparams.yarn_beta_fast),
  6168. beta_slow (cparams.yarn_beta_slow),
  6169. norm_eps (hparams.f_norm_eps),
  6170. norm_rms_eps (hparams.f_norm_rms_eps),
  6171. n_tokens (batch.n_tokens),
  6172. n_kv (worst_case ? kv_self.size : kv_self.n),
  6173. n_outputs (worst_case ? n_tokens : lctx.n_outputs),
  6174. kv_head (worst_case ? (kv_self.recurrent ? 0 : kv_self.size - n_tokens) : kv_self.head),
  6175. n_ctx_orig (cparams.n_ctx_orig_yarn),
  6176. flash_attn (cparams.flash_attn),
  6177. pooling_type (cparams.pooling_type),
  6178. rope_type (hparams.rope_type),
  6179. cb (cb),
  6180. buf_compute_meta (lctx.buf_compute_meta) {
  6181. // all initializations should be done in init()
  6182. }
  6183. void init() {
  6184. struct ggml_init_params params = {
  6185. /*.mem_size =*/ buf_compute_meta.size(),
  6186. /*.mem_buffer =*/ buf_compute_meta.data(),
  6187. /*.no_alloc =*/ true,
  6188. };
  6189. ctx0 = ggml_init(params);
  6190. lctx.inp_tokens = nullptr;
  6191. lctx.inp_embd = nullptr;
  6192. lctx.inp_pos = nullptr;
  6193. lctx.inp_out_ids = nullptr;
  6194. lctx.inp_KQ_mask = nullptr;
  6195. lctx.inp_K_shift = nullptr;
  6196. lctx.inp_mean = nullptr;
  6197. lctx.inp_cls = nullptr;
  6198. lctx.inp_s_copy = nullptr;
  6199. lctx.inp_s_mask = nullptr;
  6200. lctx.inp_s_seq = nullptr;
  6201. }
  6202. void free() {
  6203. if (ctx0) {
  6204. ggml_free(ctx0);
  6205. ctx0 = nullptr;
  6206. }
  6207. }
  6208. struct ggml_cgraph * build_k_shift() {
  6209. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6210. GGML_ASSERT(kv_self.size == n_ctx);
  6211. lctx.inp_K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx);
  6212. cb(lctx.inp_K_shift, "K_shift", -1);
  6213. ggml_set_input(lctx.inp_K_shift);
  6214. for (int il = 0; il < n_layer; ++il) {
  6215. struct ggml_tensor * rope_factors = build_rope_factors(il);
  6216. struct ggml_tensor * tmp =
  6217. // we rotate only the first n_rot dimensions
  6218. ggml_rope_ext_inplace(ctx0,
  6219. ggml_view_3d(ctx0, kv_self.k_l[il],
  6220. n_embd_head_k, n_head_kv, n_ctx,
  6221. ggml_row_size(kv_self.k_l[il]->type, n_embd_head_k),
  6222. ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
  6223. 0),
  6224. lctx.inp_K_shift, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6225. ext_factor, attn_factor, beta_fast, beta_slow);
  6226. cb(tmp, "K_shifted", il);
  6227. ggml_build_forward_expand(gf, tmp);
  6228. }
  6229. return gf;
  6230. }
  6231. struct ggml_cgraph * build_s_copy() {
  6232. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6233. GGML_ASSERT(kv_self.recurrent);
  6234. struct ggml_tensor * state_copy = build_inp_s_copy();
  6235. for (int il = 0; il < n_layer; ++il) {
  6236. struct ggml_tensor * conv_states = ggml_reshape_2d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s(), kv_self.size);
  6237. struct ggml_tensor * ssm_states = ggml_reshape_2d(ctx0, kv_self.v_l[il], hparams.n_embd_v_s(), kv_self.size);
  6238. conv_states = ggml_get_rows(ctx0, conv_states, state_copy);
  6239. ssm_states = ggml_get_rows(ctx0, ssm_states, state_copy);
  6240. // TODO: name the intermediate tensors with cb()
  6241. ggml_build_forward_expand(gf, ggml_cpy(ctx0, conv_states, kv_self.k_l[il]));
  6242. ggml_build_forward_expand(gf, ggml_cpy(ctx0, ssm_states, kv_self.v_l[il]));
  6243. }
  6244. return gf;
  6245. }
  6246. struct ggml_cgraph * build_defrag(const std::vector<uint32_t> & ids) {
  6247. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6248. for (uint32_t i = 0; i < ids.size(); ++i) {
  6249. const uint32_t id = ids[i];
  6250. if (i == id || id == ids.size()) {
  6251. continue;
  6252. }
  6253. uint32_t nm = 1;
  6254. while (i + nm < ids.size() && ids[i + nm] == id + nm) {
  6255. nm++;
  6256. }
  6257. for (int il = 0; il < n_layer; ++il) {
  6258. ggml_tensor * view_k_src = ggml_view_2d(ctx0, kv_self.k_l[il],
  6259. n_embd_k_gqa, nm,
  6260. ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
  6261. ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*i));
  6262. ggml_tensor * view_k_dst = ggml_view_2d(ctx0, kv_self.k_l[il],
  6263. n_embd_k_gqa, nm,
  6264. ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
  6265. ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*id));
  6266. ggml_tensor * view_v_src;
  6267. ggml_tensor * view_v_dst;
  6268. if (flash_attn) {
  6269. // NOTE: the V cache is not transposed when using flash attention
  6270. view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il],
  6271. n_embd_v_gqa, nm,
  6272. ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa),
  6273. ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*i));
  6274. view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il],
  6275. n_embd_v_gqa, nm,
  6276. ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa),
  6277. ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*id));
  6278. } else {
  6279. view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il],
  6280. nm, n_embd_v_gqa,
  6281. ggml_row_size(kv_self.v_l[il]->type, kv_self.size),
  6282. ggml_row_size(kv_self.v_l[il]->type, i));
  6283. view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il],
  6284. nm, n_embd_v_gqa,
  6285. ggml_row_size(kv_self.v_l[il]->type, kv_self.size),
  6286. ggml_row_size(kv_self.v_l[il]->type, id));
  6287. }
  6288. ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_k_src, view_k_dst));
  6289. ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_v_src, view_v_dst));
  6290. }
  6291. i += nm - 1;
  6292. }
  6293. //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes);
  6294. return gf;
  6295. }
  6296. struct ggml_tensor * build_inp_pos() {
  6297. lctx.inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  6298. cb(lctx.inp_pos, "inp_pos", -1);
  6299. ggml_set_input(lctx.inp_pos);
  6300. return lctx.inp_pos;
  6301. }
  6302. struct ggml_tensor * build_rope_factors(int il) {
  6303. // choose long/short freq factors based on the context size
  6304. const auto n_ctx_pre_seq = cparams.n_ctx / cparams.n_seq_max;
  6305. if (n_ctx_pre_seq > hparams.n_ctx_orig_yarn) {
  6306. return model.layers[il].rope_long;
  6307. }
  6308. return model.layers[il].rope_short;
  6309. }
  6310. struct ggml_tensor * build_inp_out_ids() {
  6311. lctx.inp_out_ids = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs);
  6312. cb(lctx.inp_out_ids, "inp_out_ids", -1);
  6313. ggml_set_input(lctx.inp_out_ids);
  6314. return lctx.inp_out_ids;
  6315. }
  6316. struct ggml_tensor * build_inp_KQ_mask(bool causal = true) {
  6317. if (causal) {
  6318. lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
  6319. } else {
  6320. lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
  6321. }
  6322. cb(lctx.inp_KQ_mask, "KQ_mask", -1);
  6323. ggml_set_input(lctx.inp_KQ_mask);
  6324. return flash_attn ? ggml_cast(ctx0, lctx.inp_KQ_mask, GGML_TYPE_F16) : lctx.inp_KQ_mask;
  6325. }
  6326. struct ggml_tensor * build_inp_mean() {
  6327. lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
  6328. cb(lctx.inp_mean, "inp_mean", -1);
  6329. ggml_set_input(lctx.inp_mean);
  6330. return lctx.inp_mean;
  6331. }
  6332. struct ggml_tensor * build_inp_cls() {
  6333. lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
  6334. cb(lctx.inp_cls, "inp_cls", -1);
  6335. ggml_set_input(lctx.inp_cls);
  6336. return lctx.inp_cls;
  6337. }
  6338. struct ggml_tensor * build_inp_s_copy() {
  6339. lctx.inp_s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, kv_self.size);
  6340. cb(lctx.inp_s_copy, "inp_s_copy", -1);
  6341. ggml_set_input(lctx.inp_s_copy);
  6342. return lctx.inp_s_copy;
  6343. }
  6344. struct ggml_tensor * build_inp_s_mask() {
  6345. lctx.inp_s_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv);
  6346. cb(lctx.inp_s_mask, "inp_s_mask", -1);
  6347. ggml_set_input(lctx.inp_s_mask);
  6348. return lctx.inp_s_mask;
  6349. }
  6350. struct ggml_tensor * build_inp_s_seq() {
  6351. lctx.inp_s_seq = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens);
  6352. cb(lctx.inp_s_seq, "inp_s_seq", -1);
  6353. ggml_set_input(lctx.inp_s_seq);
  6354. return lctx.inp_s_seq;
  6355. }
  6356. struct ggml_cgraph * build_llama() {
  6357. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6358. // mutable variable, needed during the last layer of the computation to skip unused tokens
  6359. int32_t n_tokens = this->n_tokens;
  6360. const int64_t n_embd_head = hparams.n_embd_head_v;
  6361. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6362. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6363. struct ggml_tensor * cur;
  6364. struct ggml_tensor * inpL;
  6365. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6366. // inp_pos - contains the positions
  6367. struct ggml_tensor * inp_pos = build_inp_pos();
  6368. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6369. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6370. for (int il = 0; il < n_layer; ++il) {
  6371. struct ggml_tensor * inpSA = inpL;
  6372. // norm
  6373. cur = llm_build_norm(ctx0, inpL, hparams,
  6374. model.layers[il].attn_norm, NULL,
  6375. LLM_NORM_RMS, cb, il);
  6376. cb(cur, "attn_norm", il);
  6377. // self-attention
  6378. {
  6379. // compute Q and K and RoPE them
  6380. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6381. cb(Qcur, "Qcur", il);
  6382. if (model.layers[il].bq) {
  6383. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6384. cb(Qcur, "Qcur", il);
  6385. }
  6386. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6387. cb(Kcur, "Kcur", il);
  6388. if (model.layers[il].bk) {
  6389. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6390. cb(Kcur, "Kcur", il);
  6391. }
  6392. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6393. cb(Vcur, "Vcur", il);
  6394. if (model.layers[il].bv) {
  6395. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6396. cb(Vcur, "Vcur", il);
  6397. }
  6398. Qcur = ggml_rope_ext(
  6399. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  6400. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6401. ext_factor, attn_factor, beta_fast, beta_slow
  6402. );
  6403. cb(Qcur, "Qcur", il);
  6404. Kcur = ggml_rope_ext(
  6405. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  6406. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6407. ext_factor, attn_factor, beta_fast, beta_slow
  6408. );
  6409. cb(Kcur, "Kcur", il);
  6410. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6411. model.layers[il].wo, model.layers[il].bo,
  6412. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6413. }
  6414. if (il == n_layer - 1) {
  6415. // skip computing output for unused tokens
  6416. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6417. n_tokens = n_outputs;
  6418. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6419. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6420. }
  6421. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6422. cb(ffn_inp, "ffn_inp", il);
  6423. // feed-forward network
  6424. if (model.layers[il].ffn_gate_inp == nullptr) {
  6425. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6426. model.layers[il].ffn_norm, NULL,
  6427. LLM_NORM_RMS, cb, il);
  6428. cb(cur, "ffn_norm", il);
  6429. cur = llm_build_ffn(ctx0, cur,
  6430. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  6431. model.layers[il].ffn_gate, model.layers[il].ffn_gate_b,
  6432. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  6433. NULL,
  6434. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  6435. cb(cur, "ffn_out", il);
  6436. } else {
  6437. // MoE branch
  6438. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6439. model.layers[il].ffn_norm, NULL,
  6440. LLM_NORM_RMS, cb, il);
  6441. cb(cur, "ffn_norm", il);
  6442. cur = llm_build_moe_ffn(ctx0, cur,
  6443. model.layers[il].ffn_gate_inp,
  6444. model.layers[il].ffn_up_exps,
  6445. model.layers[il].ffn_gate_exps,
  6446. model.layers[il].ffn_down_exps,
  6447. n_expert, n_expert_used,
  6448. LLM_FFN_SILU, true,
  6449. false, 0.0,
  6450. cb, il);
  6451. cb(cur, "ffn_moe_out", il);
  6452. }
  6453. cur = ggml_add(ctx0, cur, ffn_inp);
  6454. cb(cur, "ffn_out", il);
  6455. ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
  6456. if (layer_dir != nullptr) {
  6457. cur = ggml_add(ctx0, cur, layer_dir);
  6458. }
  6459. cb(cur, "l_out", il);
  6460. // input for next layer
  6461. inpL = cur;
  6462. }
  6463. cur = inpL;
  6464. cur = llm_build_norm(ctx0, cur, hparams,
  6465. model.output_norm, NULL,
  6466. LLM_NORM_RMS, cb, -1);
  6467. cb(cur, "result_norm", -1);
  6468. // lm_head
  6469. cur = ggml_mul_mat(ctx0, model.output, cur);
  6470. cb(cur, "result_output", -1);
  6471. ggml_build_forward_expand(gf, cur);
  6472. return gf;
  6473. }
  6474. struct ggml_cgraph * build_baichuan() {
  6475. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6476. const int64_t n_embd_head = hparams.n_embd_head_v;
  6477. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6478. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6479. struct ggml_tensor * cur;
  6480. struct ggml_tensor * inpL;
  6481. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6482. // inp_pos - contains the positions
  6483. struct ggml_tensor * inp_pos = model.type == MODEL_7B ? build_inp_pos() : nullptr;
  6484. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6485. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6486. for (int il = 0; il < n_layer; ++il) {
  6487. struct ggml_tensor * inpSA = inpL;
  6488. cur = llm_build_norm(ctx0, inpL, hparams,
  6489. model.layers[il].attn_norm, NULL,
  6490. LLM_NORM_RMS, cb, il);
  6491. cb(cur, "attn_norm", il);
  6492. // self-attention
  6493. {
  6494. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6495. cb(Qcur, "Qcur", il);
  6496. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6497. cb(Kcur, "Kcur", il);
  6498. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6499. cb(Vcur, "Vcur", il);
  6500. switch (model.type) {
  6501. case MODEL_7B:
  6502. Qcur = ggml_rope_ext(
  6503. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  6504. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6505. ext_factor, attn_factor, beta_fast, beta_slow
  6506. );
  6507. Kcur = ggml_rope_ext(
  6508. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  6509. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6510. ext_factor, attn_factor, beta_fast, beta_slow
  6511. );
  6512. break;
  6513. case MODEL_13B:
  6514. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens);
  6515. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens);
  6516. break;
  6517. default:
  6518. GGML_ASSERT(false);
  6519. }
  6520. cb(Qcur, "Qcur", il);
  6521. cb(Kcur, "Kcur", il);
  6522. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6523. model.layers[il].wo, NULL,
  6524. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6525. }
  6526. if (il == n_layer - 1) {
  6527. // skip computing output for unused tokens
  6528. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6529. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6530. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6531. }
  6532. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6533. cb(ffn_inp, "ffn_inp", il);
  6534. // feed-forward network
  6535. {
  6536. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6537. model.layers[il].ffn_norm, NULL,
  6538. LLM_NORM_RMS, cb, il);
  6539. cb(cur, "ffn_norm", il);
  6540. cur = llm_build_ffn(ctx0, cur,
  6541. model.layers[il].ffn_up, NULL,
  6542. model.layers[il].ffn_gate, NULL,
  6543. model.layers[il].ffn_down, NULL,
  6544. NULL,
  6545. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  6546. cb(cur, "ffn_out", il);
  6547. }
  6548. cur = ggml_add(ctx0, cur, ffn_inp);
  6549. cb(cur, "l_out", il);
  6550. // input for next layer
  6551. inpL = cur;
  6552. }
  6553. cur = inpL;
  6554. cur = llm_build_norm(ctx0, cur, hparams,
  6555. model.output_norm, NULL,
  6556. LLM_NORM_RMS, cb, -1);
  6557. cb(cur, "result_norm", -1);
  6558. // lm_head
  6559. cur = ggml_mul_mat(ctx0, model.output, cur);
  6560. cb(cur, "result_output", -1);
  6561. ggml_build_forward_expand(gf, cur);
  6562. return gf;
  6563. }
  6564. struct ggml_cgraph * build_xverse() {
  6565. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6566. const int64_t n_embd_head = hparams.n_embd_head_v;
  6567. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6568. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6569. struct ggml_tensor * cur;
  6570. struct ggml_tensor * inpL;
  6571. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6572. // inp_pos - contains the positions
  6573. struct ggml_tensor * inp_pos = build_inp_pos();
  6574. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6575. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6576. for (int il = 0; il < n_layer; ++il) {
  6577. struct ggml_tensor * inpSA = inpL;
  6578. cur = llm_build_norm(ctx0, inpL, hparams,
  6579. model.layers[il].attn_norm, NULL,
  6580. LLM_NORM_RMS, cb, il);
  6581. cb(cur, "attn_norm", il);
  6582. // self-attention
  6583. {
  6584. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6585. cb(Qcur, "Qcur", il);
  6586. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6587. cb(Kcur, "Kcur", il);
  6588. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6589. cb(Vcur, "Vcur", il);
  6590. Qcur = ggml_rope_ext(
  6591. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  6592. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6593. ext_factor, attn_factor, beta_fast, beta_slow
  6594. );
  6595. cb(Qcur, "Qcur", il);
  6596. Kcur = ggml_rope_ext(
  6597. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  6598. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6599. ext_factor, attn_factor, beta_fast, beta_slow
  6600. );
  6601. cb(Kcur, "Kcur", il);
  6602. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6603. model.layers[il].wo, NULL,
  6604. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6605. }
  6606. if (il == n_layer - 1) {
  6607. // skip computing output for unused tokens
  6608. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6609. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6610. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6611. }
  6612. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6613. cb(ffn_inp, "ffn_inp", il);
  6614. // feed-forward network
  6615. {
  6616. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6617. model.layers[il].ffn_norm, NULL,
  6618. LLM_NORM_RMS, cb, il);
  6619. cb(cur, "ffn_norm", il);
  6620. cur = llm_build_ffn(ctx0, cur,
  6621. model.layers[il].ffn_up, NULL,
  6622. model.layers[il].ffn_gate, NULL,
  6623. model.layers[il].ffn_down, NULL,
  6624. NULL,
  6625. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  6626. cb(cur, "ffn_out", il);
  6627. }
  6628. cur = ggml_add(ctx0, cur, ffn_inp);
  6629. cb(cur, "l_out", il);
  6630. // input for next layer
  6631. inpL = cur;
  6632. }
  6633. cur = inpL;
  6634. cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM_RMS, cb, -1);
  6635. cb(cur, "result_norm", -1);
  6636. // lm_head
  6637. cur = ggml_mul_mat(ctx0, model.output, cur);
  6638. cb(cur, "result_output", -1);
  6639. ggml_build_forward_expand(gf, cur);
  6640. return gf;
  6641. }
  6642. struct ggml_cgraph * build_falcon() {
  6643. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6644. const int64_t n_embd_head = hparams.n_embd_head_v;
  6645. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6646. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6647. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6648. struct ggml_tensor * cur;
  6649. struct ggml_tensor * inpL;
  6650. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6651. // inp_pos - contains the positions
  6652. struct ggml_tensor * inp_pos = build_inp_pos();
  6653. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6654. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6655. for (int il = 0; il < n_layer; ++il) {
  6656. struct ggml_tensor * attn_norm;
  6657. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  6658. model.layers[il].attn_norm,
  6659. model.layers[il].attn_norm_b,
  6660. LLM_NORM, cb, il);
  6661. cb(attn_norm, "attn_norm", il);
  6662. // self-attention
  6663. {
  6664. if (model.layers[il].attn_norm_2) {
  6665. // Falcon-40B
  6666. cur = llm_build_norm(ctx0, inpL, hparams,
  6667. model.layers[il].attn_norm_2,
  6668. model.layers[il].attn_norm_2_b,
  6669. LLM_NORM, cb, il);
  6670. cb(cur, "attn_norm_2", il);
  6671. } else {
  6672. cur = attn_norm;
  6673. }
  6674. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  6675. cb(cur, "wqkv", il);
  6676. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6677. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6678. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6679. cb(Qcur, "Qcur", il);
  6680. cb(Kcur, "Kcur", il);
  6681. cb(Vcur, "Vcur", il);
  6682. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6683. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  6684. // using mode = 2 for neox mode
  6685. Qcur = ggml_rope_ext(
  6686. ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
  6687. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  6688. );
  6689. cb(Qcur, "Qcur", il);
  6690. Kcur = ggml_rope_ext(
  6691. ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
  6692. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  6693. );
  6694. cb(Kcur, "Kcur", il);
  6695. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6696. model.layers[il].wo, NULL,
  6697. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6698. }
  6699. if (il == n_layer - 1) {
  6700. // skip computing output for unused tokens
  6701. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6702. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6703. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  6704. attn_norm = ggml_get_rows(ctx0, attn_norm, inp_out_ids);
  6705. }
  6706. struct ggml_tensor * ffn_inp = cur;
  6707. // feed forward
  6708. {
  6709. cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result
  6710. model.layers[il].ffn_up, NULL,
  6711. NULL, NULL,
  6712. model.layers[il].ffn_down, NULL,
  6713. NULL,
  6714. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  6715. cb(cur, "ffn_out", il);
  6716. }
  6717. cur = ggml_add(ctx0, cur, ffn_inp);
  6718. cb(cur, "l_out", il);
  6719. cur = ggml_add(ctx0, cur, inpL);
  6720. cb(cur, "l_out", il);
  6721. // input for next layer
  6722. inpL = cur;
  6723. }
  6724. cur = inpL;
  6725. // norm
  6726. cur = llm_build_norm(ctx0, cur, hparams,
  6727. model.output_norm,
  6728. model.output_norm_b,
  6729. LLM_NORM, cb, -1);
  6730. cb(cur, "result_norm", -1);
  6731. cur = ggml_mul_mat(ctx0, model.output, cur);
  6732. cb(cur, "result_output", -1);
  6733. ggml_build_forward_expand(gf, cur);
  6734. return gf;
  6735. }
  6736. struct ggml_cgraph * build_grok() {
  6737. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6738. // mutable variable, needed during the last layer of the computation to skip unused tokens
  6739. int32_t n_tokens = this->n_tokens;
  6740. const int64_t n_embd_head = hparams.n_embd_head_v;
  6741. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6742. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6743. struct ggml_tensor * cur;
  6744. struct ggml_tensor * inpL;
  6745. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6746. // multiply by embedding_multiplier_scale of 78.38367176906169
  6747. inpL = ggml_scale(ctx0, inpL, 78.38367176906169f);
  6748. // inp_pos - contains the positions
  6749. struct ggml_tensor * inp_pos = build_inp_pos();
  6750. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6751. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6752. for (int il = 0; il < n_layer; ++il) {
  6753. struct ggml_tensor * inpSA = inpL;
  6754. // norm
  6755. cur = llm_build_norm(ctx0, inpL, hparams,
  6756. model.layers[il].attn_norm, NULL,
  6757. LLM_NORM_RMS, cb, il);
  6758. cb(cur, "attn_norm", il);
  6759. // self-attention
  6760. {
  6761. // compute Q and K and RoPE them
  6762. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  6763. cb(Qcur, "Qcur", il);
  6764. if (model.layers[il].bq) {
  6765. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  6766. cb(Qcur, "Qcur", il);
  6767. }
  6768. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  6769. cb(Kcur, "Kcur", il);
  6770. if (model.layers[il].bk) {
  6771. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  6772. cb(Kcur, "Kcur", il);
  6773. }
  6774. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  6775. cb(Vcur, "Vcur", il);
  6776. if (model.layers[il].bv) {
  6777. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  6778. cb(Vcur, "Vcur", il);
  6779. }
  6780. Qcur = ggml_rope_ext(
  6781. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  6782. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6783. ext_factor, attn_factor, beta_fast, beta_slow
  6784. );
  6785. cb(Qcur, "Qcur", il);
  6786. Kcur = ggml_rope_ext(
  6787. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  6788. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6789. ext_factor, attn_factor, beta_fast, beta_slow
  6790. );
  6791. cb(Kcur, "Kcur", il);
  6792. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6793. model.layers[il].wo, model.layers[il].bo,
  6794. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
  6795. }
  6796. if (il == n_layer - 1) {
  6797. // skip computing output for unused tokens
  6798. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6799. n_tokens = n_outputs;
  6800. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6801. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6802. }
  6803. // Grok
  6804. // if attn_out_norm is present then apply it before adding the input
  6805. if (model.layers[il].attn_out_norm) {
  6806. cur = llm_build_norm(ctx0, cur, hparams,
  6807. model.layers[il].attn_out_norm, NULL,
  6808. LLM_NORM_RMS, cb, il);
  6809. cb(cur, "attn_out_norm", il);
  6810. }
  6811. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6812. cb(ffn_inp, "ffn_inp", il);
  6813. // feed-forward network
  6814. // MoE branch
  6815. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6816. model.layers[il].ffn_norm, NULL,
  6817. LLM_NORM_RMS, cb, il);
  6818. cb(cur, "ffn_norm", il);
  6819. cur = llm_build_moe_ffn(ctx0, cur,
  6820. model.layers[il].ffn_gate_inp,
  6821. model.layers[il].ffn_up_exps,
  6822. model.layers[il].ffn_gate_exps,
  6823. model.layers[il].ffn_down_exps,
  6824. n_expert, n_expert_used,
  6825. LLM_FFN_GELU, true,
  6826. false, 0.0,
  6827. cb, il);
  6828. cb(cur, "ffn_moe_out", il);
  6829. // Grok
  6830. // if layer_out_norm is present then apply it before adding the input
  6831. // Idea: maybe ffn_out_norm is a better name
  6832. if (model.layers[il].layer_out_norm) {
  6833. cur = llm_build_norm(ctx0, cur, hparams,
  6834. model.layers[il].layer_out_norm, NULL,
  6835. LLM_NORM_RMS, cb, il);
  6836. cb(cur, "layer_out_norm", il);
  6837. }
  6838. cur = ggml_add(ctx0, cur, ffn_inp);
  6839. cb(cur, "ffn_out", il);
  6840. ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
  6841. if (layer_dir != nullptr) {
  6842. cur = ggml_add(ctx0, cur, layer_dir);
  6843. }
  6844. cb(cur, "l_out", il);
  6845. // input for next layer
  6846. inpL = cur;
  6847. }
  6848. cur = inpL;
  6849. cur = llm_build_norm(ctx0, cur, hparams,
  6850. model.output_norm, NULL,
  6851. LLM_NORM_RMS, cb, -1);
  6852. cb(cur, "result_norm", -1);
  6853. // lm_head
  6854. cur = ggml_mul_mat(ctx0, model.output, cur);
  6855. // Grok
  6856. // multiply logits by output_multiplier_scale of 0.5773502691896257
  6857. cur = ggml_scale(ctx0, cur, 0.5773502691896257f);
  6858. cb(cur, "result_output", -1);
  6859. ggml_build_forward_expand(gf, cur);
  6860. return gf;
  6861. }
  6862. struct ggml_cgraph * build_dbrx() {
  6863. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6864. // mutable variable, needed during the last layer of the computation to skip unused tokens
  6865. int32_t n_tokens = this->n_tokens;
  6866. const int64_t n_embd_head = hparams.n_embd_head_v;
  6867. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6868. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6869. GGML_ASSERT(n_embd_head == hparams.n_rot);
  6870. struct ggml_tensor * cur;
  6871. struct ggml_tensor * inpL;
  6872. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6873. // inp_pos - contains the positions
  6874. struct ggml_tensor * inp_pos = build_inp_pos();
  6875. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6876. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6877. for (int il = 0; il < n_layer; ++il) {
  6878. struct ggml_tensor * inpSA = inpL;
  6879. // norm
  6880. cur = llm_build_norm(ctx0, inpL, hparams,
  6881. model.layers[il].attn_norm, NULL,
  6882. LLM_NORM, cb, il);
  6883. cb(cur, "attn_norm", il);
  6884. // self-attention
  6885. {
  6886. struct ggml_tensor * Qcur = nullptr;
  6887. struct ggml_tensor * Kcur = nullptr;
  6888. struct ggml_tensor * Vcur = nullptr;
  6889. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  6890. cb(cur, "wqkv", il);
  6891. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  6892. cb(cur, "wqkv_clamped", il);
  6893. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6894. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6895. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6896. cb(Qcur, "Qcur", il);
  6897. cb(Kcur, "Kcur", il);
  6898. cb(Vcur, "Vcur", il);
  6899. Qcur = ggml_rope_ext(
  6900. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  6901. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6902. ext_factor, attn_factor, beta_fast, beta_slow
  6903. );
  6904. cb(Qcur, "Qcur", il);
  6905. Kcur = ggml_rope_ext(
  6906. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  6907. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  6908. ext_factor, attn_factor, beta_fast, beta_slow
  6909. );
  6910. cb(Kcur, "Kcur", il);
  6911. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6912. model.layers[il].wo, NULL,
  6913. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6914. }
  6915. if (il == n_layer - 1) {
  6916. // skip computing output for unused tokens
  6917. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  6918. n_tokens = n_outputs;
  6919. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  6920. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  6921. }
  6922. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  6923. cb(ffn_inp, "ffn_inp", il);
  6924. // feed-forward network
  6925. // MoE branch
  6926. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  6927. model.layers[il].attn_out_norm, NULL,
  6928. LLM_NORM, cb, il);
  6929. cb(cur, "attn_out_norm", il);
  6930. cur = llm_build_moe_ffn(ctx0, cur,
  6931. model.layers[il].ffn_gate_inp,
  6932. model.layers[il].ffn_up_exps,
  6933. model.layers[il].ffn_gate_exps,
  6934. model.layers[il].ffn_down_exps,
  6935. n_expert, n_expert_used,
  6936. LLM_FFN_SILU, true,
  6937. false, 0.0,
  6938. cb, il);
  6939. cb(cur, "ffn_moe_out", il);
  6940. cur = ggml_add(ctx0, cur, ffn_inp);
  6941. cb(cur, "ffn_out", il);
  6942. ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
  6943. if (layer_dir != nullptr) {
  6944. cur = ggml_add(ctx0, cur, layer_dir);
  6945. }
  6946. cb(cur, "l_out", il);
  6947. // input for next layer
  6948. inpL = cur;
  6949. }
  6950. cur = inpL;
  6951. cur = llm_build_norm(ctx0, cur, hparams,
  6952. model.output_norm, NULL,
  6953. LLM_NORM, cb, -1);
  6954. cb(cur, "result_norm", -1);
  6955. // lm_head
  6956. cur = ggml_mul_mat(ctx0, model.output, cur);
  6957. cb(cur, "result_output", -1);
  6958. ggml_build_forward_expand(gf, cur);
  6959. return gf;
  6960. }
  6961. struct ggml_cgraph * build_starcoder() {
  6962. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  6963. const int64_t n_embd_head = hparams.n_embd_head_v;
  6964. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  6965. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  6966. struct ggml_tensor * cur;
  6967. struct ggml_tensor * inpL;
  6968. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  6969. // inp_pos - contains the positions
  6970. struct ggml_tensor * inp_pos = build_inp_pos();
  6971. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  6972. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  6973. struct ggml_tensor * pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  6974. cb(pos, "pos_embd", -1);
  6975. inpL = ggml_add(ctx0, inpL, pos);
  6976. cb(inpL, "inpL", -1);
  6977. for (int il = 0; il < n_layer; ++il) {
  6978. cur = llm_build_norm(ctx0, inpL, hparams,
  6979. model.layers[il].attn_norm,
  6980. model.layers[il].attn_norm_b,
  6981. LLM_NORM, cb, il);
  6982. cb(cur, "attn_norm", il);
  6983. // self-attention
  6984. {
  6985. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  6986. cb(cur, "wqkv", il);
  6987. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  6988. cb(cur, "bqkv", il);
  6989. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  6990. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  6991. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  6992. cb(Qcur, "Qcur", il);
  6993. cb(Kcur, "Kcur", il);
  6994. cb(Vcur, "Vcur", il);
  6995. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  6996. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  6997. model.layers[il].wo, model.layers[il].bo,
  6998. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  6999. }
  7000. if (il == n_layer - 1) {
  7001. // skip computing output for unused tokens
  7002. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7003. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7004. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7005. }
  7006. // add the input
  7007. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  7008. cb(ffn_inp, "ffn_inp", il);
  7009. // FF
  7010. {
  7011. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7012. model.layers[il].ffn_norm,
  7013. model.layers[il].ffn_norm_b,
  7014. LLM_NORM, cb, il);
  7015. cb(cur, "ffn_norm", il);
  7016. cur = llm_build_ffn(ctx0, cur,
  7017. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  7018. NULL, NULL,
  7019. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  7020. NULL,
  7021. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  7022. cb(cur, "ffn_out", il);
  7023. }
  7024. inpL = ggml_add(ctx0, cur, ffn_inp);
  7025. cb(inpL, "l_out", il);
  7026. }
  7027. cur = llm_build_norm(ctx0, inpL, hparams,
  7028. model.output_norm,
  7029. model.output_norm_b,
  7030. LLM_NORM, cb, -1);
  7031. cb(cur, "result_norm", -1);
  7032. cur = ggml_mul_mat(ctx0, model.output, cur);
  7033. cb(cur, "result_output", -1);
  7034. ggml_build_forward_expand(gf, cur);
  7035. return gf;
  7036. }
  7037. struct ggml_cgraph * build_refact() {
  7038. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7039. const int64_t n_embd_head = hparams.n_embd_head_v;
  7040. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7041. struct ggml_tensor * cur;
  7042. struct ggml_tensor * inpL;
  7043. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7044. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7045. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7046. for (int il = 0; il < n_layer; ++il) {
  7047. struct ggml_tensor * inpSA = inpL;
  7048. cur = llm_build_norm(ctx0, inpL, hparams,
  7049. model.layers[il].attn_norm, NULL,
  7050. LLM_NORM_RMS, cb, il);
  7051. cb(cur, "attn_norm", il);
  7052. // self-attention
  7053. {
  7054. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  7055. cb(Qcur, "Qcur", il);
  7056. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  7057. cb(Kcur, "Kcur", il);
  7058. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  7059. cb(Vcur, "Vcur", il);
  7060. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7061. cb(Kcur, "Kcur", il);
  7062. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7063. cb(Qcur, "Qcur", il);
  7064. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7065. model.layers[il].wo, NULL,
  7066. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7067. }
  7068. if (il == n_layer - 1) {
  7069. // skip computing output for unused tokens
  7070. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7071. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7072. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7073. }
  7074. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7075. cb(ffn_inp, "ffn_inp", il);
  7076. // feed-forward network
  7077. {
  7078. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7079. model.layers[il].ffn_norm, NULL,
  7080. LLM_NORM_RMS, cb, il);
  7081. cb(cur, "ffn_norm", il);
  7082. cur = llm_build_ffn(ctx0, cur,
  7083. model.layers[il].ffn_up, NULL,
  7084. model.layers[il].ffn_gate, NULL,
  7085. model.layers[il].ffn_down, NULL,
  7086. NULL,
  7087. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7088. cb(cur, "ffn_out", il);
  7089. }
  7090. cur = ggml_add(ctx0, cur, ffn_inp);
  7091. cb(cur, "l_out", il);
  7092. // input for next layer
  7093. inpL = cur;
  7094. }
  7095. cur = inpL;
  7096. cur = llm_build_norm(ctx0, cur, hparams,
  7097. model.output_norm, NULL,
  7098. LLM_NORM_RMS, cb, -1);
  7099. cb(cur, "result_norm", -1);
  7100. // lm_head
  7101. cur = ggml_mul_mat(ctx0, model.output, cur);
  7102. cb(cur, "result_output", -1);
  7103. ggml_build_forward_expand(gf, cur);
  7104. return gf;
  7105. }
  7106. struct ggml_cgraph * build_bert() {
  7107. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7108. const int64_t n_embd_head = hparams.n_embd_head_v;
  7109. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7110. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7111. struct ggml_tensor * cur;
  7112. struct ggml_tensor * inpL;
  7113. struct ggml_tensor * inp_pos = nullptr;
  7114. if (model.arch != LLM_ARCH_JINA_BERT_V2) {
  7115. inp_pos = build_inp_pos();
  7116. }
  7117. struct ggml_tensor * inp_mean = build_inp_mean();
  7118. struct ggml_tensor * inp_cls = build_inp_cls();
  7119. // construct input embeddings (token, type, position)
  7120. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7121. // token types are hardcoded to zero ("Sentence A")
  7122. struct ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0);
  7123. inpL = ggml_add(ctx0, inpL, type_row0);
  7124. if (model.arch == LLM_ARCH_BERT) {
  7125. inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL);
  7126. }
  7127. cb(inpL, "inp_embd", -1);
  7128. // embed layer norm
  7129. inpL = llm_build_norm(ctx0, inpL, hparams, model.tok_norm, model.tok_norm_b, LLM_NORM, cb, -1);
  7130. cb(inpL, "inp_norm", -1);
  7131. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7132. struct ggml_tensor * KQ_mask = build_inp_KQ_mask(false);
  7133. // iterate layers
  7134. for (int il = 0; il < n_layer; ++il) {
  7135. struct ggml_tensor * cur = inpL;
  7136. struct ggml_tensor * Qcur;
  7137. struct ggml_tensor * Kcur;
  7138. struct ggml_tensor * Vcur;
  7139. // self-attention
  7140. if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) {
  7141. Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq);
  7142. cb(Qcur, "Qcur", il);
  7143. if (model.layers[il].attn_q_norm) {
  7144. Qcur = llm_build_norm(ctx0, Qcur, hparams,
  7145. model.layers[il].attn_q_norm,
  7146. model.layers[il].attn_q_norm_b,
  7147. LLM_NORM, cb, il);
  7148. }
  7149. Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), model.layers[il].bk);
  7150. cb(Kcur, "Kcur", il);
  7151. if (model.layers[il].attn_k_norm) {
  7152. Kcur = llm_build_norm(ctx0, Kcur, hparams,
  7153. model.layers[il].attn_k_norm,
  7154. model.layers[il].attn_k_norm_b,
  7155. LLM_NORM, cb, il);
  7156. }
  7157. Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), model.layers[il].bv);
  7158. cb(Vcur, "Vcur", il);
  7159. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7160. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7161. } else {
  7162. // compute Q and K and RoPE them
  7163. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  7164. cb(cur, "wqkv", il);
  7165. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7166. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7167. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  7168. cb(Qcur, "Qcur", il);
  7169. cb(Kcur, "Kcur", il);
  7170. cb(Vcur, "Vcur", il);
  7171. Qcur = ggml_rope_ext(
  7172. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  7173. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7174. ext_factor, attn_factor, beta_fast, beta_slow
  7175. );
  7176. cb(Qcur, "Qcur", il);
  7177. Kcur = ggml_rope_ext(
  7178. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  7179. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7180. ext_factor, attn_factor, beta_fast, beta_slow
  7181. );
  7182. cb(Kcur, "Kcur", il);
  7183. }
  7184. struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
  7185. struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
  7186. struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
  7187. cb(kq, "kq", il);
  7188. kq = ggml_soft_max_ext(ctx0, kq, KQ_mask, 1.0f/sqrtf(float(n_embd_head)), hparams.f_max_alibi_bias);
  7189. cb(kq, "kq_soft_max_ext", il);
  7190. struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_tokens)));
  7191. cb(v, "v", il);
  7192. struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_tokens, n_embd_head, n_head_kv), kq);
  7193. cb(kqv, "kqv", il);
  7194. struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
  7195. cb(kqv_merged, "kqv_merged", il);
  7196. cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens);
  7197. cb(cur, "kqv_merged_cont", il);
  7198. ggml_build_forward_expand(gf, cur);
  7199. cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur);
  7200. if (model.layers[il].bo) {
  7201. cb(cur, "kqv_wo", il);
  7202. }
  7203. if (model.layers[il].bo) {
  7204. cur = ggml_add(ctx0, cur, model.layers[il].bo);
  7205. }
  7206. cb(cur, "kqv_out", il);
  7207. if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) {
  7208. // skip computing output for unused tokens
  7209. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7210. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7211. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7212. }
  7213. // re-add the layer input
  7214. cur = ggml_add(ctx0, cur, inpL);
  7215. // attention layer norm
  7216. cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, cb, il);
  7217. if (model.layers[il].attn_norm_2 != nullptr) {
  7218. cur = ggml_add(ctx0, cur, inpL); // re-add the layer input
  7219. cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_norm_2, model.layers[il].attn_norm_2_b, LLM_NORM, cb, il);
  7220. }
  7221. struct ggml_tensor * ffn_inp = cur;
  7222. cb(ffn_inp, "ffn_inp", il);
  7223. // feed-forward network
  7224. if (model.arch == LLM_ARCH_BERT) {
  7225. cur = llm_build_ffn(ctx0, cur,
  7226. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  7227. NULL, NULL,
  7228. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  7229. NULL,
  7230. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  7231. } else if (model.arch == LLM_ARCH_JINA_BERT_V2) {
  7232. cur = llm_build_ffn(ctx0, cur,
  7233. model.layers[il].ffn_up, NULL,
  7234. model.layers[il].ffn_gate, NULL,
  7235. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  7236. NULL,
  7237. LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
  7238. } else {
  7239. cur = llm_build_ffn(ctx0, cur,
  7240. model.layers[il].ffn_up, NULL,
  7241. model.layers[il].ffn_gate, NULL,
  7242. model.layers[il].ffn_down, NULL,
  7243. NULL,
  7244. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7245. }
  7246. cb(cur, "ffn_out", il);
  7247. // attentions bypass the intermediate layer
  7248. cur = ggml_add(ctx0, cur, ffn_inp);
  7249. // output layer norm
  7250. cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, cb, il);
  7251. // input for next layer
  7252. inpL = cur;
  7253. }
  7254. // final output
  7255. cur = inpL;
  7256. cb(cur, "result_embd", -1);
  7257. // pooling layer
  7258. switch (pooling_type) {
  7259. case LLAMA_POOLING_TYPE_NONE:
  7260. {
  7261. // nop
  7262. } break;
  7263. case LLAMA_POOLING_TYPE_MEAN:
  7264. {
  7265. cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, cur)), inp_mean);
  7266. cb(cur, "result_embd_pooled", -1);
  7267. } break;
  7268. case LLAMA_POOLING_TYPE_CLS:
  7269. {
  7270. cur = ggml_get_rows(ctx0, cur, inp_cls);
  7271. cb(cur, "result_embd_pooled", -1);
  7272. } break;
  7273. case LLAMA_POOLING_TYPE_UNSPECIFIED:
  7274. {
  7275. GGML_ASSERT(false && "Invalid pooling type");
  7276. } break;
  7277. }
  7278. ggml_build_forward_expand(gf, cur);
  7279. return gf;
  7280. }
  7281. struct ggml_cgraph * build_bloom() {
  7282. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7283. const int64_t n_embd_head = hparams.n_embd_head_v;
  7284. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7285. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7286. struct ggml_tensor * cur;
  7287. struct ggml_tensor * inpL;
  7288. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7289. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7290. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7291. inpL = llm_build_norm(ctx0, inpL, hparams,
  7292. model.tok_norm,
  7293. model.tok_norm_b,
  7294. LLM_NORM, cb, -1);
  7295. cb(inpL, "inp_norm", -1);
  7296. for (int il = 0; il < n_layer; ++il) {
  7297. cur = llm_build_norm(ctx0, inpL, hparams,
  7298. model.layers[il].attn_norm,
  7299. model.layers[il].attn_norm_b,
  7300. LLM_NORM, cb, il);
  7301. cb(cur, "attn_norm", il);
  7302. // self-attention
  7303. {
  7304. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  7305. cb(cur, "wqkv", il);
  7306. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7307. cb(cur, "bqkv", il);
  7308. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7309. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7310. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  7311. cb(Qcur, "Qcur", il);
  7312. cb(Kcur, "Kcur", il);
  7313. cb(Vcur, "Vcur", il);
  7314. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7315. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7316. model.layers[il].wo, model.layers[il].bo,
  7317. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7318. }
  7319. if (il == n_layer - 1) {
  7320. // skip computing output for unused tokens
  7321. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7322. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7323. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7324. }
  7325. // Add the input
  7326. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  7327. cb(ffn_inp, "ffn_inp", il);
  7328. // FF
  7329. {
  7330. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7331. model.layers[il].ffn_norm,
  7332. model.layers[il].ffn_norm_b,
  7333. LLM_NORM, cb, il);
  7334. cb(cur, "ffn_norm", il);
  7335. cur = llm_build_ffn(ctx0, cur,
  7336. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  7337. NULL, NULL,
  7338. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  7339. NULL,
  7340. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  7341. cb(cur, "ffn_out", il);
  7342. }
  7343. inpL = ggml_add(ctx0, cur, ffn_inp);
  7344. cb(inpL, "l_out", il);
  7345. }
  7346. cur = llm_build_norm(ctx0, inpL, hparams,
  7347. model.output_norm,
  7348. model.output_norm_b,
  7349. LLM_NORM, cb, -1);
  7350. cb(cur, "result_norm", -1);
  7351. cur = ggml_mul_mat(ctx0, model.output, cur);
  7352. cb(cur, "result_output", -1);
  7353. ggml_build_forward_expand(gf, cur);
  7354. return gf;
  7355. }
  7356. struct ggml_cgraph * build_mpt() {
  7357. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7358. const int64_t n_embd_head = hparams.n_embd_head_v;
  7359. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7360. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7361. struct ggml_tensor * cur;
  7362. struct ggml_tensor * pos;
  7363. struct ggml_tensor * inpL;
  7364. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7365. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7366. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7367. if (model.pos_embd) {
  7368. // inp_pos - contains the positions
  7369. struct ggml_tensor * inp_pos = build_inp_pos();
  7370. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  7371. cb(pos, "pos_embd", -1);
  7372. inpL = ggml_add(ctx0, inpL, pos);
  7373. cb(inpL, "inpL", -1);
  7374. }
  7375. for (int il = 0; il < n_layer; ++il) {
  7376. struct ggml_tensor * attn_norm;
  7377. attn_norm = llm_build_norm(ctx0, inpL, hparams,
  7378. model.layers[il].attn_norm,
  7379. model.layers[il].attn_norm_b,
  7380. LLM_NORM, cb, il);
  7381. cb(attn_norm, "attn_norm", il);
  7382. // self-attention
  7383. {
  7384. cur = attn_norm;
  7385. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  7386. cb(cur, "wqkv", il);
  7387. if (model.layers[il].bqkv){
  7388. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7389. cb(cur, "bqkv", il);
  7390. }
  7391. if (hparams.f_clamp_kqv > 0.0f) {
  7392. cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  7393. cb(cur, "wqkv_clamped", il);
  7394. }
  7395. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7396. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7397. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  7398. cb(Qcur, "Qcur", il);
  7399. cb(Kcur, "Kcur", il);
  7400. cb(Vcur, "Vcur", il);
  7401. // Q/K Layernorm
  7402. if (model.layers[il].attn_q_norm) {
  7403. Qcur = llm_build_norm(ctx0, Qcur, hparams,
  7404. model.layers[il].attn_q_norm,
  7405. model.layers[il].attn_q_norm_b,
  7406. LLM_NORM, cb, il);
  7407. cb(Qcur, "Qcur", il);
  7408. Kcur = llm_build_norm(ctx0, Kcur, hparams,
  7409. model.layers[il].attn_k_norm,
  7410. model.layers[il].attn_k_norm_b,
  7411. LLM_NORM, cb, il);
  7412. cb(Kcur, "Kcur", il);
  7413. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7414. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7415. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7416. model.layers[il].wo, model.layers[il].bo,
  7417. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7418. } else {
  7419. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7420. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7421. model.layers[il].wo, model.layers[il].bo,
  7422. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7423. }
  7424. }
  7425. if (il == n_layer - 1) {
  7426. // skip computing output for unused tokens
  7427. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7428. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7429. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7430. }
  7431. // Add the input
  7432. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  7433. cb(ffn_inp, "ffn_inp", il);
  7434. // feed forward
  7435. {
  7436. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7437. model.layers[il].ffn_norm,
  7438. model.layers[il].ffn_norm_b,
  7439. LLM_NORM, cb, il);
  7440. cb(cur, "ffn_norm", il);
  7441. cur = llm_build_ffn(ctx0, cur,
  7442. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  7443. NULL, NULL,
  7444. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  7445. model.layers[il].ffn_act,
  7446. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  7447. cb(cur, "ffn_out", il);
  7448. }
  7449. cur = ggml_add(ctx0, cur, ffn_inp);
  7450. cb(cur, "l_out", il);
  7451. // input for next layer
  7452. inpL = cur;
  7453. }
  7454. cur = inpL;
  7455. cur = llm_build_norm(ctx0, cur, hparams,
  7456. model.output_norm,
  7457. model.output_norm_b,
  7458. LLM_NORM, cb, -1);
  7459. cb(cur, "result_norm", -1);
  7460. cur = ggml_mul_mat(ctx0, model.output, cur);
  7461. cb(cur, "result_output", -1);
  7462. ggml_build_forward_expand(gf, cur);
  7463. return gf;
  7464. }
  7465. struct ggml_cgraph * build_stablelm() {
  7466. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  7467. const int64_t n_embd_head = hparams.n_embd_head_v;
  7468. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7469. struct ggml_tensor * cur;
  7470. struct ggml_tensor * inpL;
  7471. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7472. // inp_pos - contains the positions
  7473. struct ggml_tensor * inp_pos = build_inp_pos();
  7474. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7475. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7476. for (int il = 0; il < n_layer; ++il) {
  7477. // norm
  7478. cur = llm_build_norm(ctx0, inpL, hparams,
  7479. model.layers[il].attn_norm,
  7480. model.layers[il].attn_norm_b,
  7481. LLM_NORM, cb, il);
  7482. cb(cur, "attn_norm", il);
  7483. struct ggml_tensor * inpSA = cur;
  7484. // self-attention
  7485. {
  7486. // compute Q and K and RoPE them
  7487. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  7488. cb(Qcur, "Qcur", il);
  7489. if (model.layers[il].bq) {
  7490. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7491. cb(Qcur, "Qcur", il);
  7492. }
  7493. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  7494. cb(Kcur, "Kcur", il);
  7495. if (model.layers[il].bk) {
  7496. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7497. cb(Kcur, "Kcur", il);
  7498. }
  7499. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  7500. cb(Vcur, "Vcur", il);
  7501. if (model.layers[il].bv) {
  7502. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7503. cb(Vcur, "Vcur", il);
  7504. }
  7505. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7506. cb(Qcur, "Qcur", il);
  7507. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7508. cb(Kcur, "Kcur", il);
  7509. if (model.layers[il].attn_q_norm) {
  7510. Qcur = llm_build_norm(ctx0, Qcur, hparams,
  7511. model.layers[il].attn_q_norm,
  7512. NULL,
  7513. LLM_NORM, cb, il);
  7514. cb(Qcur, "Qcur", il);
  7515. }
  7516. if (model.layers[il].attn_k_norm) {
  7517. Kcur = llm_build_norm(ctx0, Kcur, hparams,
  7518. model.layers[il].attn_k_norm,
  7519. NULL,
  7520. LLM_NORM, cb, il);
  7521. cb(Kcur, "Kcur", il);
  7522. }
  7523. Qcur = ggml_rope_ext(
  7524. ctx0, Qcur, inp_pos, nullptr,
  7525. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7526. ext_factor, attn_factor, beta_fast, beta_slow
  7527. );
  7528. cb(Qcur, "Qcur", il);
  7529. Kcur = ggml_rope_ext(
  7530. ctx0, Kcur, inp_pos, nullptr,
  7531. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7532. ext_factor, attn_factor, beta_fast, beta_slow
  7533. );
  7534. cb(Kcur, "Kcur", il);
  7535. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7536. model.layers[il].wo, NULL,
  7537. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7538. }
  7539. if (il == n_layer - 1) {
  7540. // skip computing output for unused tokens
  7541. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7542. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7543. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7544. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7545. }
  7546. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  7547. cb(ffn_inp, "ffn_inp", il);
  7548. // feed-forward network
  7549. {
  7550. if (model.layers[il].ffn_norm) {
  7551. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7552. model.layers[il].ffn_norm,
  7553. model.layers[il].ffn_norm_b,
  7554. LLM_NORM, cb, il);
  7555. cb(cur, "ffn_norm", il);
  7556. } else {
  7557. // parallel residual
  7558. cur = inpSA;
  7559. }
  7560. cur = llm_build_ffn(ctx0, cur,
  7561. model.layers[il].ffn_up, NULL,
  7562. model.layers[il].ffn_gate, NULL,
  7563. model.layers[il].ffn_down, NULL,
  7564. NULL,
  7565. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7566. cb(cur, "ffn_out", il);
  7567. }
  7568. cur = ggml_add(ctx0, cur, ffn_inp);
  7569. cb(cur, "l_out", il);
  7570. // input for next layer
  7571. inpL = cur;
  7572. }
  7573. cur = inpL;
  7574. cur = llm_build_norm(ctx0, cur, hparams,
  7575. model.output_norm,
  7576. model.output_norm_b,
  7577. LLM_NORM, cb, -1);
  7578. cb(cur, "result_norm", -1);
  7579. // lm_head
  7580. cur = ggml_mul_mat(ctx0, model.output, cur);
  7581. cb(cur, "result_output", -1);
  7582. ggml_build_forward_expand(gf, cur);
  7583. return gf;
  7584. }
  7585. struct ggml_cgraph * build_qwen() {
  7586. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7587. const int64_t n_embd_head = hparams.n_embd_head_v;
  7588. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7589. struct ggml_tensor * cur;
  7590. struct ggml_tensor * inpL;
  7591. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7592. // inp_pos - contains the positions
  7593. struct ggml_tensor * inp_pos = build_inp_pos();
  7594. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7595. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7596. for (int il = 0; il < n_layer; ++il) {
  7597. struct ggml_tensor * inpSA = inpL;
  7598. cur = llm_build_norm(ctx0, inpL, hparams,
  7599. model.layers[il].attn_norm, NULL,
  7600. LLM_NORM_RMS, cb, il);
  7601. cb(cur, "attn_norm", il);
  7602. // self-attention
  7603. {
  7604. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  7605. cb(cur, "wqkv", il);
  7606. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7607. cb(cur, "bqkv", il);
  7608. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7609. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7610. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd)));
  7611. cb(Qcur, "Qcur", il);
  7612. cb(Kcur, "Kcur", il);
  7613. cb(Vcur, "Vcur", il);
  7614. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7615. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7616. // using mode = 2 for neox mode
  7617. Qcur = ggml_rope_ext(
  7618. ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
  7619. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  7620. );
  7621. cb(Qcur, "Qcur", il);
  7622. Kcur = ggml_rope_ext(
  7623. ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
  7624. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  7625. );
  7626. cb(Kcur, "Kcur", il);
  7627. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7628. model.layers[il].wo, NULL,
  7629. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7630. }
  7631. if (il == n_layer - 1) {
  7632. // skip computing output for unused tokens
  7633. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7634. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7635. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7636. }
  7637. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7638. cb(ffn_inp, "ffn_inp", il);
  7639. // feed-forward forward
  7640. {
  7641. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7642. model.layers[il].ffn_norm, NULL,
  7643. LLM_NORM_RMS, cb, il);
  7644. cb(cur, "ffn_norm", il);
  7645. cur = llm_build_ffn(ctx0, cur,
  7646. model.layers[il].ffn_up, NULL,
  7647. model.layers[il].ffn_gate, NULL,
  7648. model.layers[il].ffn_down, NULL,
  7649. NULL,
  7650. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7651. cb(cur, "ffn_out", il);
  7652. }
  7653. cur = ggml_add(ctx0, cur, ffn_inp);
  7654. cb(cur, "l_out", il);
  7655. // input for next layer
  7656. inpL = cur;
  7657. }
  7658. cur = inpL;
  7659. cur = llm_build_norm(ctx0, cur, hparams,
  7660. model.output_norm, NULL,
  7661. LLM_NORM_RMS, cb, -1);
  7662. cb(cur, "result_norm", -1);
  7663. // lm_head
  7664. cur = ggml_mul_mat(ctx0, model.output, cur);
  7665. cb(cur, "result_output", -1);
  7666. ggml_build_forward_expand(gf, cur);
  7667. return gf;
  7668. }
  7669. struct ggml_cgraph * build_qwen2() {
  7670. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7671. const int64_t n_embd_head = hparams.n_embd_head_v;
  7672. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7673. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7674. struct ggml_tensor * cur;
  7675. struct ggml_tensor * inpL;
  7676. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7677. // inp_pos - contains the positions
  7678. struct ggml_tensor * inp_pos = build_inp_pos();
  7679. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7680. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7681. for (int il = 0; il < n_layer; ++il) {
  7682. struct ggml_tensor * inpSA = inpL;
  7683. // norm
  7684. cur = llm_build_norm(ctx0, inpL, hparams,
  7685. model.layers[il].attn_norm, NULL,
  7686. LLM_NORM_RMS, cb, il);
  7687. cb(cur, "attn_norm", il);
  7688. // self-attention
  7689. {
  7690. // compute Q and K and RoPE them
  7691. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  7692. cb(Qcur, "Qcur", il);
  7693. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7694. cb(Qcur, "Qcur", il);
  7695. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  7696. cb(Kcur, "Kcur", il);
  7697. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7698. cb(Kcur, "Kcur", il);
  7699. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  7700. cb(Vcur, "Vcur", il);
  7701. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7702. cb(Vcur, "Vcur", il);
  7703. Qcur = ggml_rope_ext(
  7704. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  7705. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7706. ext_factor, attn_factor, beta_fast, beta_slow
  7707. );
  7708. cb(Qcur, "Qcur", il);
  7709. Kcur = ggml_rope_ext(
  7710. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  7711. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7712. ext_factor, attn_factor, beta_fast, beta_slow
  7713. );
  7714. cb(Kcur, "Kcur", il);
  7715. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7716. model.layers[il].wo, model.layers[il].bo,
  7717. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7718. }
  7719. if (il == n_layer - 1) {
  7720. // skip computing output for unused tokens
  7721. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7722. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7723. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7724. }
  7725. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7726. cb(ffn_inp, "ffn_inp", il);
  7727. // feed-forward network
  7728. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7729. model.layers[il].ffn_norm, NULL,
  7730. LLM_NORM_RMS, cb, il);
  7731. cb(cur, "ffn_norm", il);
  7732. cur = llm_build_ffn(ctx0, cur,
  7733. model.layers[il].ffn_up, NULL,
  7734. model.layers[il].ffn_gate, NULL,
  7735. model.layers[il].ffn_down, NULL,
  7736. NULL,
  7737. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7738. cb(cur, "ffn_out", il);
  7739. cur = ggml_add(ctx0, cur, ffn_inp);
  7740. cb(cur, "l_out", il);
  7741. // input for next layer
  7742. inpL = cur;
  7743. }
  7744. cur = inpL;
  7745. cur = llm_build_norm(ctx0, cur, hparams,
  7746. model.output_norm, NULL,
  7747. LLM_NORM_RMS, cb, -1);
  7748. cb(cur, "result_norm", -1);
  7749. // lm_head
  7750. cur = ggml_mul_mat(ctx0, model.output, cur);
  7751. cb(cur, "result_output", -1);
  7752. ggml_build_forward_expand(gf, cur);
  7753. return gf;
  7754. }
  7755. struct ggml_cgraph * build_qwen2moe() {
  7756. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7757. // mutable variable, needed during the last layer of the computation to skip unused tokens
  7758. int32_t n_tokens = this->n_tokens;
  7759. const int64_t n_embd_head = hparams.n_embd_head_v;
  7760. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7761. GGML_ASSERT(n_embd_head == hparams.n_rot);
  7762. struct ggml_tensor * cur;
  7763. struct ggml_tensor * inpL;
  7764. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7765. // inp_pos - contains the positions
  7766. struct ggml_tensor * inp_pos = build_inp_pos();
  7767. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7768. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7769. for (int il = 0; il < n_layer; ++il) {
  7770. struct ggml_tensor * inpSA = inpL;
  7771. // norm
  7772. cur = llm_build_norm(ctx0, inpL, hparams,
  7773. model.layers[il].attn_norm, NULL,
  7774. LLM_NORM_RMS, cb, il);
  7775. cb(cur, "attn_norm", il);
  7776. // self_attention
  7777. {
  7778. // compute Q and K and RoPE them
  7779. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  7780. cb(Qcur, "Qcur", il);
  7781. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  7782. cb(Qcur, "Qcur", il);
  7783. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  7784. cb(Kcur, "Kcur", il);
  7785. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  7786. cb(Kcur, "Kcur", il);
  7787. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  7788. cb(Vcur, "Vcur", il);
  7789. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  7790. cb(Vcur, "Vcur", il);
  7791. Qcur = ggml_rope_ext(
  7792. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  7793. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7794. ext_factor, attn_factor, beta_fast, beta_slow
  7795. );
  7796. cb(Qcur, "Qcur", il);
  7797. Kcur = ggml_rope_ext(
  7798. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  7799. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  7800. ext_factor, attn_factor, beta_fast, beta_slow
  7801. );
  7802. cb(Kcur, "Kcur", il);
  7803. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7804. model.layers[il].wo, model.layers[il].bo,
  7805. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  7806. }
  7807. if (il == n_layer - 1) {
  7808. // skip computing output for unused tokens
  7809. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7810. n_tokens = n_outputs;
  7811. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7812. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  7813. }
  7814. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  7815. cb(ffn_inp, "ffn_inp", il);
  7816. // MoE branch
  7817. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  7818. model.layers[il].ffn_norm, NULL,
  7819. LLM_NORM_RMS, cb, il);
  7820. cb(cur, "ffn_norm", il);
  7821. ggml_tensor * moe_out =
  7822. llm_build_moe_ffn(ctx0, cur,
  7823. model.layers[il].ffn_gate_inp,
  7824. model.layers[il].ffn_up_exps,
  7825. model.layers[il].ffn_gate_exps,
  7826. model.layers[il].ffn_down_exps,
  7827. n_expert, n_expert_used,
  7828. LLM_FFN_SILU, false,
  7829. false, 0.0,
  7830. cb, il);
  7831. cb(cur, "ffn_moe_out", il);
  7832. // FFN shared expert
  7833. {
  7834. ggml_tensor * cur_gate_inp = ggml_mul_mat(ctx0, model.layers[il].ffn_gate_inp_shexp, cur);
  7835. cb(cur_gate_inp, "ffn_shexp_gate_inp", il);
  7836. // sigmoid
  7837. ggml_tensor * cur_gate = ggml_div(ctx0, ggml_silu(ctx0, cur_gate_inp), cur_gate_inp);
  7838. cb(cur_gate, "ffn_shexp_gate", il);
  7839. ggml_tensor * cur_ffn = llm_build_ffn(ctx0, cur,
  7840. model.layers[il].ffn_up_shexp, NULL,
  7841. model.layers[il].ffn_gate_shexp, NULL,
  7842. model.layers[il].ffn_down_shexp, NULL,
  7843. NULL,
  7844. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  7845. cb(cur_ffn, "ffn_shexp", il);
  7846. ggml_tensor * ffn_shexp_out = ggml_mul(ctx0, cur_ffn, cur_gate);
  7847. cb(ffn_shexp_out, "ffn_shexp_out", il);
  7848. moe_out = ggml_add(ctx0, moe_out, ffn_shexp_out);
  7849. cb(moe_out, "ffn_out", il);
  7850. cur = moe_out;
  7851. }
  7852. cur = ggml_add(ctx0, cur, ffn_inp);
  7853. cb(cur, "l_out", il);
  7854. // input for next layer
  7855. inpL = cur;
  7856. }
  7857. cur = inpL;
  7858. cur = llm_build_norm(ctx0, cur, hparams,
  7859. model.output_norm, NULL,
  7860. LLM_NORM_RMS, cb, -1);
  7861. cb(cur, "result_norm", -1);
  7862. // lm_head
  7863. cur = ggml_mul_mat(ctx0, model.output, cur);
  7864. cb(cur, "result_output", -1);
  7865. ggml_build_forward_expand(gf, cur);
  7866. return gf;
  7867. }
  7868. struct ggml_cgraph * build_phi2() {
  7869. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7870. const int64_t n_embd_head = hparams.n_embd_head_v;
  7871. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7872. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7873. struct ggml_tensor * cur;
  7874. struct ggml_tensor * attn_norm_output;
  7875. struct ggml_tensor * ffn_output;
  7876. struct ggml_tensor * inpL;
  7877. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7878. // inp_pos - contains the positions
  7879. struct ggml_tensor * inp_pos = build_inp_pos();
  7880. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7881. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7882. for (int il = 0; il < n_layer; ++il) {
  7883. attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
  7884. model.layers[il].attn_norm,
  7885. model.layers[il].attn_norm_b,
  7886. LLM_NORM, cb, il);
  7887. cb(attn_norm_output, "attn_norm", il);
  7888. // self-attention
  7889. {
  7890. struct ggml_tensor * Qcur = nullptr;
  7891. struct ggml_tensor * Kcur = nullptr;
  7892. struct ggml_tensor * Vcur = nullptr;
  7893. if (model.layers[il].wqkv) {
  7894. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output);
  7895. cb(cur, "wqkv", il);
  7896. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  7897. cb(cur, "bqkv", il);
  7898. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  7899. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  7900. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  7901. } else {
  7902. Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq);
  7903. Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk);
  7904. Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv);
  7905. }
  7906. cb(Qcur, "Qcur", il);
  7907. cb(Kcur, "Kcur", il);
  7908. cb(Vcur, "Vcur", il);
  7909. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  7910. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  7911. Qcur = ggml_rope_ext(
  7912. ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
  7913. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  7914. );
  7915. cb(Qcur, "Qcur", il);
  7916. // with phi2, we scale the Q to avoid precision issues
  7917. // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
  7918. Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head)));
  7919. cb(Qcur, "Qcur", il);
  7920. Kcur = ggml_rope_ext(
  7921. ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
  7922. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  7923. );
  7924. cb(Kcur, "Kcur", il);
  7925. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  7926. model.layers[il].wo, model.layers[il].bo,
  7927. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
  7928. }
  7929. if (il == n_layer - 1) {
  7930. // skip computing output for unused tokens
  7931. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  7932. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  7933. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  7934. attn_norm_output = ggml_get_rows(ctx0, attn_norm_output, inp_out_ids);
  7935. }
  7936. // FF
  7937. {
  7938. ffn_output = llm_build_ffn(ctx0, attn_norm_output,
  7939. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  7940. NULL, NULL,
  7941. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  7942. NULL,
  7943. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  7944. cb(ffn_output, "ffn_out", il);
  7945. }
  7946. cur = ggml_add(ctx0, cur, ffn_output);
  7947. cb(cur, "l_out", il);
  7948. cur = ggml_add(ctx0, cur, inpL);
  7949. cb(cur, "l_out", il);
  7950. inpL = cur;
  7951. }
  7952. cur = llm_build_norm(ctx0, inpL, hparams,
  7953. model.output_norm,
  7954. model.output_norm_b,
  7955. LLM_NORM, cb, -1);
  7956. cb(cur, "result_norm", -1);
  7957. cur = ggml_mul_mat(ctx0, model.output, cur);
  7958. cb(cur, "result_output_no_bias", -1);
  7959. cur = ggml_add(ctx0, cur, model.output_b);
  7960. cb(cur, "result_output", -1);
  7961. ggml_build_forward_expand(gf, cur);
  7962. return gf;
  7963. }
  7964. struct ggml_cgraph * build_phi3() {
  7965. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  7966. const int64_t n_embd_head = hparams.n_embd_head_v;
  7967. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  7968. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7969. struct ggml_tensor * cur;
  7970. struct ggml_tensor * inpL;
  7971. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  7972. // inp_pos - contains the positions
  7973. struct ggml_tensor * inp_pos = build_inp_pos();
  7974. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  7975. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  7976. for (int il = 0; il < n_layer; ++il) {
  7977. auto residual = inpL;
  7978. // self-attention
  7979. {
  7980. // rope freq factors for 128k context
  7981. struct ggml_tensor * rope_factors = build_rope_factors(il);
  7982. struct ggml_tensor* attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
  7983. model.layers[il].attn_norm,
  7984. NULL,
  7985. LLM_NORM_RMS, cb, il);
  7986. cb(attn_norm_output, "attn_norm", il);
  7987. struct ggml_tensor * Qcur = nullptr;
  7988. struct ggml_tensor * Kcur = nullptr;
  7989. struct ggml_tensor * Vcur = nullptr;
  7990. if (model.layers[il].wqkv) {
  7991. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output);
  7992. cb(cur, "wqkv", il);
  7993. Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd)));
  7994. Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd)));
  7995. Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa)));
  7996. }
  7997. else {
  7998. Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq);
  7999. Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk);
  8000. Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv);
  8001. }
  8002. cb(Qcur, "Qcur", il);
  8003. cb(Kcur, "Kcur", il);
  8004. cb(Vcur, "Vcur", il);
  8005. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8006. Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
  8007. Qcur = ggml_rope_ext(
  8008. ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig,
  8009. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  8010. );
  8011. cb(Qcur, "Qcur", il);
  8012. Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head)));
  8013. cb(Qcur, "Qcur", il);
  8014. Kcur = ggml_rope_ext(
  8015. ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig,
  8016. freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
  8017. );
  8018. cb(Kcur, "Kcur", il);
  8019. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8020. model.layers[il].wo, model.layers[il].bo,
  8021. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
  8022. }
  8023. if (il == n_layer - 1) {
  8024. // skip computing output for unused tokens
  8025. struct ggml_tensor* inp_out_ids = build_inp_out_ids();
  8026. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8027. residual = ggml_get_rows(ctx0, residual, inp_out_ids);
  8028. }
  8029. cur = ggml_add(ctx0, cur, residual);
  8030. residual = cur;
  8031. cur = llm_build_norm(ctx0, cur, hparams,
  8032. model.layers[il].ffn_norm, NULL,
  8033. LLM_NORM_RMS, cb, il);
  8034. cb(cur, "ffn_norm", il);
  8035. // FF
  8036. // special-case: the up and gate tensors are merged into a single tensor
  8037. // TOOD: support into llm_build_ffn
  8038. {
  8039. struct ggml_tensor* up = ggml_mul_mat(ctx0, model.layers[il].ffn_up, cur);
  8040. cb(up, "ffn_up", il);
  8041. auto g = ggml_cont(ctx0, ggml_view_2d(ctx0, up, up->ne[0] / 2, up->ne[1], ggml_row_size(up->type, up->ne[0]), 0));
  8042. auto y = ggml_cont(ctx0, ggml_view_2d(ctx0, up, up->ne[0] / 2, up->ne[1], ggml_row_size(up->type, up->ne[0]), up->nb[1] / 2));
  8043. y = ggml_mul(ctx0, y, ggml_silu(ctx0, g));
  8044. cb(y, "ffn_gate", il);
  8045. auto down = ggml_mul_mat(ctx0, model.layers[il].ffn_down, y);
  8046. cb(down, "ffn_down", il);
  8047. cur = down;
  8048. cb(cur, "ffn_out", il);
  8049. }
  8050. cur = ggml_add(ctx0, residual, cur);
  8051. cb(cur, "l_out", il);
  8052. inpL = cur;
  8053. }
  8054. cur = llm_build_norm(ctx0, inpL, hparams,
  8055. model.output_norm,
  8056. NULL,
  8057. LLM_NORM_RMS, cb, -1);
  8058. cb(cur, "result_norm", -1);
  8059. cur = ggml_mul_mat(ctx0, model.output, cur);
  8060. cb(cur, "result_output", -1);
  8061. ggml_build_forward_expand(gf, cur);
  8062. return gf;
  8063. }
  8064. struct ggml_cgraph * build_plamo() {
  8065. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  8066. const int64_t n_embd_head = hparams.n_embd_head_v;
  8067. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8068. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8069. struct ggml_tensor * cur;
  8070. struct ggml_tensor * inpL;
  8071. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8072. // inp_pos - contains the positions
  8073. struct ggml_tensor * inp_pos = build_inp_pos();
  8074. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8075. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8076. for (int il = 0; il < n_layer; ++il) {
  8077. // norm
  8078. cur = llm_build_norm(ctx0, inpL, hparams,
  8079. model.layers[il].attn_norm, NULL,
  8080. LLM_NORM_RMS, cb, il);
  8081. cb(cur, "attn_norm", il);
  8082. struct ggml_tensor * attention_norm = cur;
  8083. // self-attention
  8084. {
  8085. // compute Q and K and RoPE them
  8086. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8087. cb(Qcur, "Qcur", il);
  8088. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8089. cb(Kcur, "Kcur", il);
  8090. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8091. cb(Vcur, "Vcur", il);
  8092. Qcur = ggml_rope_ext(
  8093. ctx0, ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens), inp_pos, nullptr,
  8094. n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale,
  8095. ext_factor, attn_factor, beta_fast, beta_slow);
  8096. cb(Qcur, "Qcur", il);
  8097. Kcur = ggml_rope_ext(
  8098. ctx0, ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens), inp_pos, nullptr,
  8099. n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale,
  8100. ext_factor, attn_factor, beta_fast, beta_slow);
  8101. cb(Kcur, "Kcur", il);
  8102. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8103. model.layers[il].wo, NULL,
  8104. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8105. }
  8106. struct ggml_tensor * sa_out = cur;
  8107. cur = attention_norm;
  8108. if (il == n_layer - 1) {
  8109. // skip computing output for unused tokens
  8110. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8111. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8112. sa_out = ggml_get_rows(ctx0, sa_out, inp_out_ids);
  8113. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8114. }
  8115. // feed-forward network
  8116. {
  8117. cur = llm_build_ffn(ctx0, cur,
  8118. model.layers[il].ffn_up, NULL,
  8119. model.layers[il].ffn_gate, NULL,
  8120. model.layers[il].ffn_down, NULL,
  8121. NULL,
  8122. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  8123. cb(cur, "ffn_out", il);
  8124. }
  8125. cur = ggml_add(ctx0, cur, sa_out);
  8126. cb(cur, "l_out", il);
  8127. cur = ggml_add(ctx0, cur, inpL);
  8128. cb(cur, "l_out", il);
  8129. // input for next layer
  8130. inpL = cur;
  8131. }
  8132. cur = inpL;
  8133. cur = llm_build_norm(ctx0, cur, hparams,
  8134. model.output_norm, NULL,
  8135. LLM_NORM_RMS, cb, -1);
  8136. cb(cur, "result_norm", -1);
  8137. // lm_head
  8138. cur = ggml_mul_mat(ctx0, model.output, cur);
  8139. cb(cur, "result_output", -1);
  8140. ggml_build_forward_expand(gf, cur);
  8141. return gf;
  8142. }
  8143. struct ggml_cgraph * build_gpt2() {
  8144. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8145. const int64_t n_embd_head = hparams.n_embd_head_v;
  8146. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  8147. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8148. struct ggml_tensor * cur;
  8149. struct ggml_tensor * pos;
  8150. struct ggml_tensor * inpL;
  8151. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8152. // inp_pos - contains the positions
  8153. struct ggml_tensor * inp_pos = build_inp_pos();
  8154. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8155. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8156. pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
  8157. cb(pos, "pos_embd", -1);
  8158. inpL = ggml_add(ctx0, inpL, pos);
  8159. cb(inpL, "inpL", -1);
  8160. for (int il = 0; il < n_layer; ++il) {
  8161. cur = llm_build_norm(ctx0, inpL, hparams,
  8162. model.layers[il].attn_norm,
  8163. model.layers[il].attn_norm_b,
  8164. LLM_NORM, cb, il);
  8165. cb(cur, "attn_norm", il);
  8166. // self-attention
  8167. {
  8168. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  8169. cb(cur, "wqkv", il);
  8170. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  8171. cb(cur, "bqkv", il);
  8172. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  8173. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  8174. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  8175. cb(Qcur, "Qcur", il);
  8176. cb(Kcur, "Kcur", il);
  8177. cb(Vcur, "Vcur", il);
  8178. Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
  8179. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8180. model.layers[il].wo, model.layers[il].bo,
  8181. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8182. }
  8183. if (il == n_layer - 1) {
  8184. // skip computing output for unused tokens
  8185. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8186. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8187. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8188. }
  8189. // add the input
  8190. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  8191. cb(ffn_inp, "ffn_inp", il);
  8192. // FF
  8193. {
  8194. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  8195. model.layers[il].ffn_norm,
  8196. model.layers[il].ffn_norm_b,
  8197. LLM_NORM, cb, il);
  8198. cb(cur, "ffn_norm", il);
  8199. cur = llm_build_ffn(ctx0, cur,
  8200. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  8201. NULL, NULL,
  8202. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  8203. NULL,
  8204. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  8205. cb(cur, "ffn_out", il);
  8206. }
  8207. inpL = ggml_add(ctx0, cur, ffn_inp);
  8208. cb(inpL, "l_out", il);
  8209. }
  8210. cur = llm_build_norm(ctx0, inpL, hparams,
  8211. model.output_norm,
  8212. model.output_norm_b,
  8213. LLM_NORM, cb, -1);
  8214. cb(cur, "result_norm", -1);
  8215. cur = ggml_mul_mat(ctx0, model.output, cur);
  8216. cb(cur, "result_output", -1);
  8217. ggml_build_forward_expand(gf, cur);
  8218. return gf;
  8219. }
  8220. struct ggml_cgraph * build_codeshell() {
  8221. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8222. const int64_t n_embd_head = hparams.n_embd_head_v;
  8223. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  8224. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8225. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8226. struct ggml_tensor * cur;
  8227. struct ggml_tensor * inpL;
  8228. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8229. // inp_pos - contains the positions
  8230. struct ggml_tensor * inp_pos = build_inp_pos();
  8231. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8232. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8233. for (int il = 0; il < n_layer; ++il) {
  8234. cur = llm_build_norm(ctx0, inpL, hparams,
  8235. model.layers[il].attn_norm,
  8236. model.layers[il].attn_norm_b,
  8237. LLM_NORM, cb, il);
  8238. cb(cur, "attn_norm", il);
  8239. // self-attention
  8240. {
  8241. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  8242. cb(cur, "wqkv", il);
  8243. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  8244. cb(cur, "bqkv", il);
  8245. struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  8246. struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  8247. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  8248. cb(tmpq, "tmpq", il);
  8249. cb(tmpk, "tmpk", il);
  8250. cb(Vcur, "Vcur", il);
  8251. struct ggml_tensor * Qcur = ggml_rope_ext(
  8252. ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  8253. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8254. ext_factor, attn_factor, beta_fast, beta_slow
  8255. );
  8256. cb(Qcur, "Qcur", il);
  8257. struct ggml_tensor * Kcur = ggml_rope_ext(
  8258. ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  8259. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8260. ext_factor, attn_factor, beta_fast, beta_slow
  8261. );
  8262. cb(Kcur, "Kcur", il);
  8263. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8264. model.layers[il].wo, model.layers[il].bo,
  8265. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8266. }
  8267. if (il == n_layer - 1) {
  8268. // skip computing output for unused tokens
  8269. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8270. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8271. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8272. }
  8273. // add the input
  8274. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  8275. cb(ffn_inp, "ffn_inp", il);
  8276. // FF
  8277. {
  8278. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  8279. model.layers[il].ffn_norm,
  8280. model.layers[il].ffn_norm_b,
  8281. LLM_NORM, cb, il);
  8282. cb(cur, "ffn_norm", il);
  8283. cur = llm_build_ffn(ctx0, cur,
  8284. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  8285. NULL, NULL,
  8286. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  8287. NULL,
  8288. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  8289. cb(cur, "ffn_out", il);
  8290. }
  8291. inpL = ggml_add(ctx0, cur, ffn_inp);
  8292. cb(inpL, "l_out", il);
  8293. }
  8294. cur = llm_build_norm(ctx0, inpL, hparams,
  8295. model.output_norm,
  8296. model.output_norm_b,
  8297. LLM_NORM, cb, -1);
  8298. cb(cur, "result_norm", -1);
  8299. cur = ggml_mul_mat(ctx0, model.output, cur);
  8300. cb(cur, "result_output", -1);
  8301. ggml_build_forward_expand(gf, cur);
  8302. return gf;
  8303. }
  8304. struct ggml_cgraph * build_orion() {
  8305. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8306. const int64_t n_embd_head = hparams.n_embd_head_v;
  8307. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8308. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8309. struct ggml_tensor * cur;
  8310. struct ggml_tensor * inpL;
  8311. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8312. // inp_pos - contains the positions
  8313. struct ggml_tensor * inp_pos = build_inp_pos();
  8314. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8315. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8316. for (int il = 0; il < n_layer; ++il) {
  8317. struct ggml_tensor * inpSA = inpL;
  8318. // norm
  8319. cur = llm_build_norm(ctx0, inpL, hparams,
  8320. model.layers[il].attn_norm, model.layers[il].attn_norm_b,
  8321. LLM_NORM, cb, il);
  8322. cb(cur, "attn_norm", il);
  8323. // self-attention
  8324. {
  8325. // compute Q and K and RoPE them
  8326. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8327. cb(Qcur, "Qcur", il);
  8328. // if (model.layers[il].bq) {
  8329. // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8330. // cb(Qcur, "Qcur", il);
  8331. // }
  8332. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8333. cb(Kcur, "Kcur", il);
  8334. // if (model.layers[il].bk) {
  8335. // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8336. // cb(Kcur, "Kcur", il);
  8337. // }
  8338. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8339. cb(Vcur, "Vcur", il);
  8340. // if (model.layers[il].bv) {
  8341. // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8342. // cb(Vcur, "Vcur", il);
  8343. // }
  8344. Qcur = ggml_rope_ext(
  8345. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  8346. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8347. ext_factor, attn_factor, beta_fast, beta_slow
  8348. );
  8349. cb(Qcur, "Qcur", il);
  8350. Kcur = ggml_rope_ext(
  8351. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  8352. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8353. ext_factor, attn_factor, beta_fast, beta_slow
  8354. );
  8355. cb(Kcur, "Kcur", il);
  8356. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8357. model.layers[il].wo, NULL,
  8358. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8359. }
  8360. if (il == n_layer - 1) {
  8361. // skip computing output for unused tokens
  8362. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8363. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8364. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8365. }
  8366. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8367. cb(ffn_inp, "ffn_inp", il);
  8368. // feed-forward network
  8369. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  8370. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  8371. LLM_NORM, cb, il);
  8372. cb(cur, "ffn_norm", il);
  8373. cur = llm_build_ffn(ctx0, cur,
  8374. model.layers[il].ffn_up, NULL,
  8375. model.layers[il].ffn_gate, NULL,
  8376. model.layers[il].ffn_down, NULL,
  8377. NULL,
  8378. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  8379. cb(cur, "ffn_out", il);
  8380. cur = ggml_add(ctx0, cur, ffn_inp);
  8381. cb(cur, "l_out", il);
  8382. // input for next layer
  8383. inpL = cur;
  8384. }
  8385. cur = inpL;
  8386. cur = llm_build_norm(ctx0, cur, hparams,
  8387. model.output_norm, model.output_norm_b,
  8388. LLM_NORM, cb, -1);
  8389. cb(cur, "result_norm", -1);
  8390. // lm_head
  8391. cur = ggml_mul_mat(ctx0, model.output, cur);
  8392. cb(cur, "result_output", -1);
  8393. ggml_build_forward_expand(gf, cur);
  8394. return gf;
  8395. }
  8396. struct ggml_cgraph * build_internlm2() {
  8397. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8398. const int64_t n_embd_head = hparams.n_embd_head_v;
  8399. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8400. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8401. struct ggml_tensor * cur;
  8402. struct ggml_tensor * inpL;
  8403. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8404. // inp_pos - contains the positions
  8405. struct ggml_tensor * inp_pos = build_inp_pos();
  8406. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8407. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8408. for (int il = 0; il < n_layer; ++il) {
  8409. struct ggml_tensor * inpSA = inpL;
  8410. // norm
  8411. cur = llm_build_norm(ctx0, inpL, hparams,
  8412. model.layers[il].attn_norm, NULL,
  8413. LLM_NORM_RMS, cb, il);
  8414. cb(cur, "attn_norm", il);
  8415. // self-attention
  8416. {
  8417. // compute Q and K and RoPE them
  8418. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8419. cb(Qcur, "Qcur", il);
  8420. if (model.layers[il].bq) {
  8421. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8422. cb(Qcur, "Qcur", il);
  8423. }
  8424. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8425. cb(Kcur, "Kcur", il);
  8426. if (model.layers[il].bk) {
  8427. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8428. cb(Kcur, "Kcur", il);
  8429. }
  8430. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8431. cb(Vcur, "Vcur", il);
  8432. if (model.layers[il].bv) {
  8433. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8434. cb(Vcur, "Vcur", il);
  8435. }
  8436. Qcur = ggml_rope_ext(
  8437. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  8438. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8439. ext_factor, attn_factor, beta_fast, beta_slow
  8440. );
  8441. cb(Qcur, "Qcur", il);
  8442. Kcur = ggml_rope_ext(
  8443. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  8444. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8445. ext_factor, attn_factor, beta_fast, beta_slow
  8446. );
  8447. cb(Kcur, "Kcur", il);
  8448. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8449. model.layers[il].wo, model.layers[il].bo,
  8450. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8451. }
  8452. if (il == n_layer - 1) {
  8453. // skip computing output for unused tokens
  8454. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8455. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8456. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8457. }
  8458. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8459. cb(ffn_inp, "ffn_inp", il);
  8460. // feed-forward network
  8461. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  8462. model.layers[il].ffn_norm, NULL,
  8463. LLM_NORM_RMS, cb, il);
  8464. cb(cur, "ffn_norm", il);
  8465. cur = llm_build_ffn(ctx0, cur,
  8466. model.layers[il].ffn_up, NULL,
  8467. model.layers[il].ffn_gate, NULL,
  8468. model.layers[il].ffn_down, NULL,
  8469. NULL,
  8470. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  8471. cb(cur, "ffn_out", il);
  8472. cur = ggml_add(ctx0, cur, ffn_inp);
  8473. cb(cur, "l_out", il);
  8474. // input for next layer
  8475. inpL = cur;
  8476. }
  8477. cur = inpL;
  8478. cur = llm_build_norm(ctx0, cur, hparams,
  8479. model.output_norm, NULL,
  8480. LLM_NORM_RMS, cb, -1);
  8481. cb(cur, "result_norm", -1);
  8482. // lm_head
  8483. cur = ggml_mul_mat(ctx0, model.output, cur);
  8484. cb(cur, "result_output", -1);
  8485. ggml_build_forward_expand(gf, cur);
  8486. return gf;
  8487. }
  8488. // ref: https://arxiv.org/abs/2203.03466
  8489. // https://github.com/ggerganov/llama.cpp/issues/5276#issuecomment-1925774738
  8490. // based on the original build_llama() function
  8491. struct ggml_cgraph * build_minicpm() {
  8492. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8493. const int64_t n_embd_head = hparams.n_embd_head_v;
  8494. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8495. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8496. const int64_t n_embd = hparams.n_embd;
  8497. //TODO: if the model varies, these parameters need to be read from the model
  8498. const int64_t n_embd_base = 256;
  8499. const float scale_embd = 12.0f;
  8500. const float scale_depth = 1.4f;
  8501. struct ggml_tensor * cur;
  8502. struct ggml_tensor * inpL;
  8503. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8504. // scale the input embeddings
  8505. inpL = ggml_scale(ctx0, inpL, scale_embd);
  8506. cb(inpL, "inp_scaled", -1);
  8507. // inp_pos - contains the positions
  8508. struct ggml_tensor * inp_pos = build_inp_pos();
  8509. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8510. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8511. for (int il = 0; il < n_layer; ++il) {
  8512. struct ggml_tensor * inpSA = inpL;
  8513. // norm
  8514. cur = llm_build_norm(ctx0, inpL, hparams,
  8515. model.layers[il].attn_norm, NULL,
  8516. LLM_NORM_RMS, cb, il);
  8517. cb(cur, "attn_norm", il);
  8518. // self-attention
  8519. {
  8520. // compute Q and K and RoPE them
  8521. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8522. cb(Qcur, "Qcur", il);
  8523. if (model.layers[il].bq) {
  8524. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8525. cb(Qcur, "Qcur", il);
  8526. }
  8527. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8528. cb(Kcur, "Kcur", il);
  8529. if (model.layers[il].bk) {
  8530. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8531. cb(Kcur, "Kcur", il);
  8532. }
  8533. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8534. cb(Vcur, "Vcur", il);
  8535. if (model.layers[il].bv) {
  8536. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8537. cb(Vcur, "Vcur", il);
  8538. }
  8539. Qcur = ggml_rope_ext(
  8540. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  8541. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8542. ext_factor, attn_factor, beta_fast, beta_slow
  8543. );
  8544. cb(Qcur, "Qcur", il);
  8545. Kcur = ggml_rope_ext(
  8546. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  8547. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8548. ext_factor, attn_factor, beta_fast, beta_slow
  8549. );
  8550. cb(Kcur, "Kcur", il);
  8551. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8552. model.layers[il].wo, model.layers[il].bo,
  8553. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8554. }
  8555. if (il == n_layer - 1) {
  8556. // skip computing output for unused tokens
  8557. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8558. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8559. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8560. }
  8561. // scale_res - scale the hidden states for residual connection
  8562. const float scale_res = scale_depth/sqrtf(float(n_layer));
  8563. cur = ggml_scale(ctx0, cur, scale_res);
  8564. cb(cur, "hidden_scaled", -1);
  8565. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8566. cb(ffn_inp, "ffn_inp", il);
  8567. // feed-forward network
  8568. {
  8569. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  8570. model.layers[il].ffn_norm, NULL,
  8571. LLM_NORM_RMS, cb, il);
  8572. cb(cur, "ffn_norm", il);
  8573. cur = llm_build_ffn(ctx0, cur,
  8574. model.layers[il].ffn_up, NULL,
  8575. model.layers[il].ffn_gate, NULL,
  8576. model.layers[il].ffn_down, NULL,
  8577. NULL,
  8578. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  8579. cb(cur, "ffn_out", il);
  8580. }
  8581. // scale the hidden states for residual connection
  8582. cur = ggml_scale(ctx0, cur, scale_res);
  8583. cb(cur, "hidden_scaled_ffn", -1);
  8584. cur = ggml_add(ctx0, cur, ffn_inp);
  8585. cb(cur, "l_out", il);
  8586. // input for next layer
  8587. inpL = cur;
  8588. }
  8589. cur = inpL;
  8590. cur = llm_build_norm(ctx0, cur, hparams,
  8591. model.output_norm, NULL,
  8592. LLM_NORM_RMS, cb, -1);
  8593. cb(cur, "result_norm", -1);
  8594. // lm_head scaling
  8595. const float scale_lmhead = float(n_embd_base)/float(n_embd);
  8596. cur = ggml_scale(ctx0, cur, scale_lmhead);
  8597. cb(cur, "lmhead_scaling", -1);
  8598. // lm_head
  8599. cur = ggml_mul_mat(ctx0, model.output, cur);
  8600. cb(cur, "result_output", -1);
  8601. ggml_build_forward_expand(gf, cur);
  8602. return gf;
  8603. }
  8604. struct ggml_cgraph * build_gemma() {
  8605. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8606. const int64_t n_embd_head_k = hparams.n_embd_head_k;
  8607. struct ggml_tensor * cur;
  8608. struct ggml_tensor * inpL;
  8609. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8610. inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
  8611. cb(inpL, "inp_scaled", -1);
  8612. // inp_pos - contains the positions
  8613. struct ggml_tensor * inp_pos = build_inp_pos();
  8614. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8615. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8616. for (int il = 0; il < n_layer; ++il) {
  8617. // norm
  8618. cur = llm_build_norm(ctx0, inpL, hparams,
  8619. model.layers[il].attn_norm, NULL,
  8620. LLM_NORM_RMS, cb, il);
  8621. cb(cur, "attn_norm", il);
  8622. // self-attention
  8623. {
  8624. // compute Q and K and RoPE them
  8625. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8626. cb(Qcur, "Qcur", il);
  8627. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8628. cb(Kcur, "Kcur", il);
  8629. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8630. cb(Vcur, "Vcur", il);
  8631. Qcur = ggml_rope_ext(
  8632. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr,
  8633. n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale,
  8634. ext_factor, attn_factor, beta_fast, beta_slow);
  8635. cb(Qcur, "Qcur", il);
  8636. Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));
  8637. cb(Qcur, "Qcur_scaled", il);
  8638. Kcur = ggml_rope_ext(
  8639. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr,
  8640. n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale,
  8641. ext_factor, attn_factor, beta_fast, beta_slow);
  8642. cb(Kcur, "Kcur", il);
  8643. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8644. model.layers[il].wo, NULL,
  8645. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
  8646. }
  8647. if (il == n_layer - 1) {
  8648. // skip computing output for unused tokens
  8649. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8650. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8651. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8652. }
  8653. struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
  8654. cb(sa_out, "sa_out", il);
  8655. cur = llm_build_norm(ctx0, sa_out, hparams,
  8656. model.layers[il].ffn_norm, NULL,
  8657. LLM_NORM_RMS, cb, il);
  8658. cb(cur, "ffn_norm", il);
  8659. // feed-forward network
  8660. {
  8661. cur = llm_build_ffn(ctx0, cur,
  8662. model.layers[il].ffn_up, NULL,
  8663. model.layers[il].ffn_gate, NULL,
  8664. model.layers[il].ffn_down, NULL,
  8665. NULL,
  8666. LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
  8667. cb(cur, "ffn_out", il);
  8668. }
  8669. cur = ggml_add(ctx0, cur, sa_out);
  8670. cb(cur, "l_out", il);
  8671. // input for next layer
  8672. inpL = cur;
  8673. }
  8674. cur = inpL;
  8675. cur = llm_build_norm(ctx0, cur, hparams,
  8676. model.output_norm, NULL,
  8677. LLM_NORM_RMS, cb, -1);
  8678. cb(cur, "result_norm", -1);
  8679. // lm_head
  8680. cur = ggml_mul_mat(ctx0, model.output, cur);
  8681. cb(cur, "result_output", -1);
  8682. ggml_build_forward_expand(gf, cur);
  8683. return gf;
  8684. }
  8685. struct ggml_cgraph * build_starcoder2() {
  8686. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8687. const int64_t n_embd_head = hparams.n_embd_head_v;
  8688. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8689. GGML_ASSERT(n_embd_head == hparams.n_rot);
  8690. struct ggml_tensor * cur;
  8691. struct ggml_tensor * inpL;
  8692. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8693. // inp_pos - contains the positions
  8694. struct ggml_tensor * inp_pos = build_inp_pos();
  8695. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8696. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8697. for (int il = 0; il < n_layer; ++il) {
  8698. struct ggml_tensor * inpSA = inpL;
  8699. // norm
  8700. cur = llm_build_norm(ctx0, inpL, hparams,
  8701. model.layers[il].attn_norm, model.layers[il].attn_norm_b,
  8702. LLM_NORM, cb, il);
  8703. cb(cur, "attn_norm", il);
  8704. // self-attention
  8705. {
  8706. // compute Q and K and RoPE them
  8707. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8708. cb(Qcur, "Qcur", il);
  8709. if (model.layers[il].bq) {
  8710. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8711. cb(Qcur, "Qcur", il);
  8712. }
  8713. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8714. cb(Kcur, "Kcur", il);
  8715. if (model.layers[il].bk) {
  8716. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8717. cb(Kcur, "Kcur", il);
  8718. }
  8719. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8720. cb(Vcur, "Vcur", il);
  8721. if (model.layers[il].bv) {
  8722. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8723. cb(Vcur, "Vcur", il);
  8724. }
  8725. Qcur = ggml_rope_ext(
  8726. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  8727. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8728. ext_factor, attn_factor, beta_fast, beta_slow
  8729. );
  8730. cb(Qcur, "Qcur", il);
  8731. Kcur = ggml_rope_ext(
  8732. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  8733. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8734. ext_factor, attn_factor, beta_fast, beta_slow
  8735. );
  8736. cb(Kcur, "Kcur", il);
  8737. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8738. model.layers[il].wo, model.layers[il].bo,
  8739. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8740. }
  8741. if (il == n_layer - 1) {
  8742. // skip computing output for unused tokens
  8743. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8744. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8745. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  8746. }
  8747. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  8748. cb(ffn_inp, "ffn_inp", il);
  8749. // feed-forward network
  8750. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  8751. model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
  8752. LLM_NORM, cb, il);
  8753. cb(cur, "ffn_norm", il);
  8754. cur = llm_build_ffn(ctx0, cur,
  8755. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  8756. NULL, NULL,
  8757. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  8758. NULL,
  8759. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  8760. cb(cur, "ffn_out", il);
  8761. cur = ggml_add(ctx0, cur, ffn_inp);
  8762. cb(cur, "l_out", il);
  8763. // input for next layer
  8764. inpL = cur;
  8765. }
  8766. cur = inpL;
  8767. cur = llm_build_norm(ctx0, cur, hparams,
  8768. model.output_norm, model.output_norm_b,
  8769. LLM_NORM, cb, -1);
  8770. cb(cur, "result_norm", -1);
  8771. // lm_head
  8772. cur = ggml_mul_mat(ctx0, model.output, cur);
  8773. cb(cur, "result_output", -1);
  8774. ggml_build_forward_expand(gf, cur);
  8775. return gf;
  8776. }
  8777. struct ggml_cgraph * build_mamba() {
  8778. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8779. const int64_t d_model = n_embd;
  8780. const int64_t d_conv = hparams.ssm_d_conv;
  8781. const int64_t d_inner = hparams.ssm_d_inner;
  8782. GGML_ASSERT(2 * d_model == d_inner);
  8783. const int64_t d_state = hparams.ssm_d_state;
  8784. const int64_t dt_rank = hparams.ssm_dt_rank;
  8785. struct ggml_tensor * cur;
  8786. struct ggml_tensor * inpL;
  8787. // {n_embd, n_tokens}
  8788. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8789. struct ggml_tensor * state_mask = build_inp_s_mask();
  8790. struct ggml_tensor * state_seq = build_inp_s_seq();
  8791. for (int il = 0; il < n_layer; ++il) {
  8792. // (ab)using the KV cache to store the states
  8793. struct ggml_tensor * conv_states = ggml_reshape_2d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s(), kv_self.size);
  8794. struct ggml_tensor * ssm_states = ggml_reshape_2d(ctx0, kv_self.v_l[il], hparams.n_embd_v_s(), kv_self.size);
  8795. // clear states of sequences which are starting at the beginning of this batch
  8796. {
  8797. conv_states = ggml_mul(ctx0,
  8798. ggml_view_2d(ctx0, conv_states, conv_states->ne[0], n_kv, conv_states->nb[1], kv_head*conv_states->nb[1]),
  8799. state_mask);
  8800. ssm_states = ggml_mul(ctx0,
  8801. ggml_view_2d(ctx0, ssm_states, ssm_states->ne[0], n_kv, ssm_states->nb[1], kv_head*ssm_states->nb[1]),
  8802. state_mask);
  8803. }
  8804. conv_states = ggml_reshape_3d(ctx0, conv_states, d_conv - 1, d_inner, n_kv);
  8805. ssm_states = ggml_reshape_3d(ctx0, ssm_states, d_state, d_inner, n_kv);
  8806. // norm
  8807. cur = llm_build_norm(ctx0, inpL, hparams,
  8808. model.layers[il].attn_norm, NULL,
  8809. LLM_NORM_RMS, cb, il);
  8810. cb(cur, "attn_norm", il);
  8811. // {n_embd, 2*d_inner} * {n_embd, n_tokens} => {2*d_inner, n_tokens}
  8812. struct ggml_tensor * xz = ggml_mul_mat(ctx0, model.layers[il].ssm_in, cur);
  8813. // split the above in two
  8814. // => {d_inner, n_tokens}
  8815. struct ggml_tensor * x = ggml_view_2d(ctx0, xz, d_inner, xz->ne[1], xz->nb[1], 0);
  8816. struct ggml_tensor * z = ggml_view_2d(ctx0, xz, d_inner, xz->ne[1], xz->nb[1], ggml_element_size(xz)*d_inner);
  8817. // conv
  8818. {
  8819. // Custom operator which is needed only to ease simultaneous sequence processing.
  8820. // For a single sequence, the equivalent is to concatenate the columns of conv_states and x,
  8821. // then make a self-overlapping view of that over d_conv columns at each stride in the 3rd dimension,
  8822. // then element-wise multiply that with the conv1d weigth,
  8823. // then sum the elements of each row,
  8824. // (the last two steps are a dot product over rows (also doable with mul_mat))
  8825. // then permute away the ne[0] dimension,
  8826. // and then you're left with the resulting x tensor.
  8827. // The new conv_states is the last (d_conv - 1) columns
  8828. // of the last 3rd dimensional "layer" of the self-overlapping view.
  8829. // For simultaneous sequences, it's more complicated.
  8830. struct ggml_tensor * x_conv = ggml_ssm_conv(ctx0, conv_states, x, model.layers[il].ssm_conv1d, state_seq);
  8831. // store last (d_conv - 1) columns of the conv_state part of x_conv back into the KV cache
  8832. ggml_build_forward_expand(gf,
  8833. ggml_cpy(ctx0,
  8834. ggml_view_2d(ctx0, x_conv, d_conv - 1, d_inner*n_kv, d_conv*ggml_element_size(x_conv), (1+d_inner*n_tokens)*ggml_element_size(x_conv)),
  8835. ggml_view_1d(ctx0, kv_self.k_l[il], (d_conv - 1)*(d_inner)*(n_kv), kv_head*(d_conv - 1)*(d_inner)*ggml_element_size(x_conv))));
  8836. // extract x from x_conv
  8837. x = ggml_view_2d(ctx0, x_conv, d_inner, n_tokens, d_inner*ggml_element_size(x_conv), 0);
  8838. // bias
  8839. x = ggml_add(ctx0, x, model.layers[il].ssm_conv1d_b);
  8840. x = ggml_silu(ctx0, x);
  8841. }
  8842. // ssm
  8843. {
  8844. // {d_inner, dt_rank + 2*d_state} * {d_inner, n_tokens} => {dt_rank + 2*d_state, n_tokens}
  8845. struct ggml_tensor * x_db = ggml_mul_mat(ctx0, model.layers[il].ssm_x, x);
  8846. // split
  8847. struct ggml_tensor * dt = ggml_view_2d(ctx0, x_db, dt_rank, n_tokens, x_db->nb[1], 0);
  8848. struct ggml_tensor * B = ggml_view_2d(ctx0, x_db, d_state, n_tokens, x_db->nb[1], ggml_element_size(x_db)*dt_rank);
  8849. struct ggml_tensor * C = ggml_view_2d(ctx0, x_db, d_state, n_tokens, x_db->nb[1], ggml_element_size(x_db)*(dt_rank+d_state));
  8850. // {dt_rank, d_inner} * {dt_rank, n_tokens} => {d_inner, n_tokens}
  8851. dt = ggml_mul_mat(ctx0, model.layers[il].ssm_dt, dt);
  8852. dt = ggml_add(ctx0, dt, model.layers[il].ssm_dt_b);
  8853. // Custom operator to optimize the parallel associative scan
  8854. // as described in the Annex D of the Mamba paper.
  8855. // => {d_inner, n_tokens} and {d_state, d_inner, n_kv} combined,
  8856. // because only a single tensor can be returned.
  8857. struct ggml_tensor * y_ssm_states = ggml_ssm_scan(ctx0, ssm_states, x, dt, model.layers[il].ssm_a, B, C, state_seq);
  8858. // store last states (the second part of y_ssm_states)
  8859. ggml_build_forward_expand(gf,
  8860. ggml_cpy(ctx0,
  8861. ggml_view_1d(ctx0, y_ssm_states, d_state*d_inner*n_kv, d_inner*n_tokens*ggml_element_size(y_ssm_states)),
  8862. ggml_view_1d(ctx0, kv_self.v_l[il], d_state*d_inner*n_kv, kv_head*d_state*d_inner*ggml_element_size(ssm_states))));
  8863. struct ggml_tensor * y = ggml_view_2d(ctx0, y_ssm_states, d_inner, n_tokens, d_inner*ggml_element_size(y_ssm_states), 0);
  8864. if (il == n_layer - 1) {
  8865. // skip computing output for unused tokens
  8866. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8867. x = ggml_get_rows(ctx0, x, inp_out_ids);
  8868. y = ggml_get_rows(ctx0, y, inp_out_ids);
  8869. z = ggml_get_rows(ctx0, z, inp_out_ids);
  8870. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8871. }
  8872. // {d_inner, n_tokens} * {d_inner} => {d_inner, n_tokens}
  8873. y = ggml_add(ctx0, y, ggml_mul(ctx0, x, model.layers[il].ssm_d));
  8874. y = ggml_mul(ctx0, y, ggml_silu(ctx0, z));
  8875. // {d_inner, n_embd} * {d_inner, n_tokens} => {n_embd, n_tokens}
  8876. cur = ggml_mul_mat(ctx0, model.layers[il].ssm_out, y);
  8877. }
  8878. // residual
  8879. cur = ggml_add(ctx0, cur, inpL);
  8880. cb(cur, "l_out", il);
  8881. // input for next layer
  8882. inpL = cur;
  8883. }
  8884. // final rmsnorm
  8885. cur = llm_build_norm(ctx0, inpL, hparams,
  8886. model.output_norm, NULL,
  8887. LLM_NORM_RMS, cb, -1);
  8888. cb(cur, "result_norm", -1);
  8889. // lm_head
  8890. cur = ggml_mul_mat(ctx0, model.output, cur);
  8891. cb(cur, "result_output", -1);
  8892. ggml_build_forward_expand(gf, cur);
  8893. return gf;
  8894. }
  8895. struct ggml_cgraph * build_command_r() {
  8896. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  8897. const int64_t n_embd_head = hparams.n_embd_head_v;
  8898. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  8899. const float f_logit_scale = hparams.f_logit_scale;
  8900. struct ggml_tensor * cur;
  8901. struct ggml_tensor * inpL;
  8902. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  8903. // inp_pos - contains the positions
  8904. struct ggml_tensor * inp_pos = build_inp_pos();
  8905. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  8906. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  8907. for (int il = 0; il < n_layer; ++il) {
  8908. // norm
  8909. cur = llm_build_norm(ctx0, inpL, hparams,
  8910. model.layers[il].attn_norm, NULL,
  8911. LLM_NORM, cb, il);
  8912. cb(cur, "attn_norm", il);
  8913. struct ggml_tensor * ffn_inp = cur;
  8914. // self-attention
  8915. {
  8916. // compute Q and K and RoPE them
  8917. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  8918. cb(Qcur, "Qcur", il);
  8919. if (model.layers[il].bq) {
  8920. Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  8921. cb(Qcur, "Qcur", il);
  8922. }
  8923. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  8924. cb(Kcur, "Kcur", il);
  8925. if (model.layers[il].bk) {
  8926. Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  8927. cb(Kcur, "Kcur", il);
  8928. }
  8929. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  8930. cb(Vcur, "Vcur", il);
  8931. if (model.layers[il].bv) {
  8932. Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  8933. cb(Vcur, "Vcur", il);
  8934. }
  8935. if (model.layers[il].attn_q_norm) {
  8936. Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens,
  8937. ggml_element_size(Qcur) * n_embd_head,
  8938. ggml_element_size(Qcur) * n_embd_head * n_head,
  8939. 0);
  8940. cb(Qcur, "Qcur", il);
  8941. Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens,
  8942. ggml_element_size(Kcur) * n_embd_head,
  8943. ggml_element_size(Kcur) * n_embd_head * n_head_kv,
  8944. 0);
  8945. cb(Kcur, "Kcur", il);
  8946. Qcur = llm_build_norm(ctx0, Qcur, hparams,
  8947. model.layers[il].attn_q_norm,
  8948. NULL,
  8949. LLM_NORM, cb, il);
  8950. cb(Qcur, "Qcur", il);
  8951. Kcur = llm_build_norm(ctx0, Kcur, hparams,
  8952. model.layers[il].attn_k_norm,
  8953. NULL,
  8954. LLM_NORM, cb, il);
  8955. cb(Kcur, "Kcur", il);
  8956. }
  8957. Qcur = ggml_rope_ext(
  8958. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  8959. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8960. ext_factor, attn_factor, beta_fast, beta_slow
  8961. );
  8962. cb(Qcur, "Qcur", il);
  8963. Kcur = ggml_rope_ext(
  8964. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  8965. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  8966. ext_factor, attn_factor, beta_fast, beta_slow
  8967. );
  8968. cb(Kcur, "Kcur", il);
  8969. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  8970. model.layers[il].wo, model.layers[il].bo,
  8971. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  8972. }
  8973. if (il == n_layer - 1) {
  8974. // skip computing output for unused tokens
  8975. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  8976. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  8977. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  8978. ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
  8979. }
  8980. struct ggml_tensor * attn_out = cur;
  8981. // feed-forward network
  8982. {
  8983. cur = llm_build_ffn(ctx0, ffn_inp,
  8984. model.layers[il].ffn_up, NULL,
  8985. model.layers[il].ffn_gate, NULL,
  8986. model.layers[il].ffn_down, NULL,
  8987. NULL,
  8988. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  8989. cb(cur, "ffn_out", il);
  8990. }
  8991. // add together residual + FFN + self-attention
  8992. cur = ggml_add(ctx0, cur, inpL);
  8993. cur = ggml_add(ctx0, cur, attn_out);
  8994. cb(cur, "l_out", il);
  8995. // input for next layer
  8996. inpL = cur;
  8997. }
  8998. cur = inpL;
  8999. cur = llm_build_norm(ctx0, cur, hparams,
  9000. model.output_norm, NULL,
  9001. LLM_NORM, cb, -1);
  9002. cb(cur, "result_norm", -1);
  9003. // lm_head
  9004. cur = ggml_mul_mat(ctx0, model.output, cur);
  9005. if (f_logit_scale) {
  9006. cur = ggml_scale(ctx0, cur, f_logit_scale);
  9007. }
  9008. cb(cur, "result_output", -1);
  9009. ggml_build_forward_expand(gf, cur);
  9010. return gf;
  9011. }
  9012. // ref: https://allenai.org/olmo
  9013. // based on the original build_llama() function, changes:
  9014. // * non-parametric layer norm
  9015. // * clamp qkv
  9016. // * removed bias
  9017. // * removed MoE
  9018. struct ggml_cgraph * build_olmo() {
  9019. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  9020. // mutable variable, needed during the last layer of the computation to skip unused tokens
  9021. int32_t n_tokens = this->n_tokens;
  9022. const int64_t n_embd_head = hparams.n_embd_head_v;
  9023. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  9024. GGML_ASSERT(n_embd_head == hparams.n_rot);
  9025. struct ggml_tensor * cur;
  9026. struct ggml_tensor * inpL;
  9027. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  9028. // inp_pos - contains the positions
  9029. struct ggml_tensor * inp_pos = build_inp_pos();
  9030. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  9031. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  9032. for (int il = 0; il < n_layer; ++il) {
  9033. struct ggml_tensor * inpSA = inpL;
  9034. // norm
  9035. cur = llm_build_norm(ctx0, inpL, hparams,
  9036. NULL, NULL,
  9037. LLM_NORM, cb, il);
  9038. cb(cur, "attn_norm", il);
  9039. // self-attention
  9040. {
  9041. // compute Q and K and RoPE them
  9042. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  9043. cb(Qcur, "Qcur", il);
  9044. if (hparams.f_clamp_kqv > 0.0f) {
  9045. Qcur = ggml_clamp(ctx0, Qcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  9046. cb(Qcur, "Qcur", il);
  9047. }
  9048. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  9049. cb(Kcur, "Kcur", il);
  9050. if (hparams.f_clamp_kqv > 0.0f) {
  9051. Kcur = ggml_clamp(ctx0, Kcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  9052. cb(Kcur, "Kcur", il);
  9053. }
  9054. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  9055. cb(Vcur, "Vcur", il);
  9056. if (hparams.f_clamp_kqv > 0.0f) {
  9057. Vcur = ggml_clamp(ctx0, Vcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
  9058. cb(Vcur, "Vcur", il);
  9059. }
  9060. Qcur = ggml_rope_ext(
  9061. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  9062. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9063. ext_factor, attn_factor, beta_fast, beta_slow
  9064. );
  9065. cb(Qcur, "Qcur", il);
  9066. Kcur = ggml_rope_ext(
  9067. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  9068. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9069. ext_factor, attn_factor, beta_fast, beta_slow
  9070. );
  9071. cb(Kcur, "Kcur", il);
  9072. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  9073. model.layers[il].wo, nullptr,
  9074. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  9075. }
  9076. if (il == n_layer - 1) {
  9077. // skip computing output for unused tokens
  9078. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9079. n_tokens = n_outputs;
  9080. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  9081. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  9082. }
  9083. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  9084. cb(ffn_inp, "ffn_inp", il);
  9085. // feed-forward network
  9086. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  9087. NULL, NULL,
  9088. LLM_NORM, cb, il);
  9089. cb(cur, "ffn_norm", il);
  9090. cur = llm_build_ffn(ctx0, cur,
  9091. model.layers[il].ffn_up, NULL,
  9092. model.layers[il].ffn_gate, NULL,
  9093. model.layers[il].ffn_down, NULL,
  9094. NULL,
  9095. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  9096. cb(cur, "ffn_out", il);
  9097. cur = ggml_add(ctx0, cur, ffn_inp);
  9098. cb(cur, "ffn_out", il);
  9099. ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
  9100. if (layer_dir != nullptr) {
  9101. cur = ggml_add(ctx0, cur, layer_dir);
  9102. }
  9103. cb(cur, "l_out", il);
  9104. // input for next layer
  9105. inpL = cur;
  9106. }
  9107. cur = inpL;
  9108. cur = llm_build_norm(ctx0, cur, hparams,
  9109. NULL, NULL,
  9110. LLM_NORM, cb, -1);
  9111. cb(cur, "result_norm", -1);
  9112. // lm_head
  9113. cur = ggml_mul_mat(ctx0, model.output, cur);
  9114. cb(cur, "result_output", -1);
  9115. ggml_build_forward_expand(gf, cur);
  9116. return gf;
  9117. }
  9118. struct ggml_cgraph * build_gptneox() {
  9119. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  9120. const int64_t n_embd_head = hparams.n_embd_head_v;
  9121. const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
  9122. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  9123. struct ggml_tensor * cur;
  9124. struct ggml_tensor * inpL;
  9125. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  9126. // inp_pos - contains the positions
  9127. struct ggml_tensor * inp_pos = build_inp_pos();
  9128. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  9129. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  9130. for (int il = 0; il < n_layer; ++il) {
  9131. cur = llm_build_norm(ctx0, inpL, hparams,
  9132. model.layers[il].attn_norm,
  9133. model.layers[il].attn_norm_b,
  9134. LLM_NORM, cb, il);
  9135. cb(cur, "attn_norm", il);
  9136. // self-attention
  9137. {
  9138. cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
  9139. cb(cur, "wqkv", il);
  9140. cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
  9141. cb(cur, "bqkv", il);
  9142. struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
  9143. struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
  9144. struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
  9145. cb(Qcur, "Qcur", il);
  9146. cb(Kcur, "Kcur", il);
  9147. cb(Vcur, "Vcur", il);
  9148. Qcur = ggml_rope_ext(
  9149. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  9150. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9151. ext_factor, attn_factor, beta_fast, beta_slow
  9152. );
  9153. cb(Qcur, "Qcur", il);
  9154. Kcur = ggml_rope_ext(
  9155. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  9156. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9157. ext_factor, attn_factor, beta_fast, beta_slow
  9158. );
  9159. cb(Kcur, "Kcur", il);
  9160. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  9161. model.layers[il].wo, model.layers[il].bo,
  9162. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  9163. }
  9164. if (il == n_layer - 1) {
  9165. // skip computing output for unused tokens
  9166. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9167. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  9168. inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
  9169. }
  9170. // ffn
  9171. if (hparams.use_par_res) {
  9172. // attention and ffn are computed in parallel
  9173. // x = x + attn(ln1(x)) + ffn(ln2(x))
  9174. struct ggml_tensor * attn_out = cur;
  9175. cur = llm_build_norm(ctx0, inpL, hparams,
  9176. model.layers[il].ffn_norm,
  9177. model.layers[il].ffn_norm_b,
  9178. LLM_NORM, cb, il);
  9179. cb(cur, "ffn_norm", il);
  9180. cur = llm_build_ffn(ctx0, cur,
  9181. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  9182. NULL, NULL,
  9183. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  9184. NULL,
  9185. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  9186. cb(cur, "ffn_out", il);
  9187. cur = ggml_add(ctx0, cur, inpL);
  9188. cb(cur, "ffn_out", il);
  9189. inpL = ggml_add(ctx0, cur, attn_out);
  9190. cb(inpL, "l_out", il);
  9191. } else {
  9192. // attention and ffn are computed sequentially
  9193. // x = x + attn(ln1(x))
  9194. // x = x + ffn(ln2(x))
  9195. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
  9196. cb(ffn_inp, "ffn_inp", il);
  9197. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  9198. model.layers[il].ffn_norm,
  9199. model.layers[il].ffn_norm_b,
  9200. LLM_NORM, cb, il);
  9201. cb(cur, "ffn_norm", il);
  9202. cur = llm_build_ffn(ctx0, cur,
  9203. model.layers[il].ffn_up, model.layers[il].ffn_up_b,
  9204. NULL, NULL,
  9205. model.layers[il].ffn_down, model.layers[il].ffn_down_b,
  9206. NULL,
  9207. LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
  9208. cb(cur, "ffn_out", il);
  9209. inpL = ggml_add(ctx0, cur, ffn_inp);
  9210. cb(inpL, "l_out", il);
  9211. }
  9212. }
  9213. cur = llm_build_norm(ctx0, inpL, hparams,
  9214. model.output_norm,
  9215. model.output_norm_b,
  9216. LLM_NORM, cb, -1);
  9217. cb(cur, "result_norm", -1);
  9218. cur = ggml_mul_mat(ctx0, model.output, cur);
  9219. cb(cur, "result_output", -1);
  9220. ggml_build_forward_expand(gf, cur);
  9221. return gf;
  9222. }
  9223. struct ggml_cgraph * build_arctic() {
  9224. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  9225. // mutable variable, needed during the last layer of the computation to skip unused tokens
  9226. int32_t n_tokens = this->n_tokens;
  9227. const int64_t n_embd_head = hparams.n_embd_head_v;
  9228. GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  9229. GGML_ASSERT(n_embd_head == hparams.n_rot);
  9230. struct ggml_tensor * cur;
  9231. struct ggml_tensor * inpL;
  9232. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  9233. // inp_pos - contains the positions
  9234. struct ggml_tensor * inp_pos = build_inp_pos();
  9235. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  9236. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  9237. for (int il = 0; il < n_layer; ++il) {
  9238. struct ggml_tensor * inpSA = inpL;
  9239. // norm
  9240. cur = llm_build_norm(ctx0, inpL, hparams,
  9241. model.layers[il].attn_norm, NULL,
  9242. LLM_NORM_RMS, cb, il);
  9243. cb(cur, "attn_norm", il);
  9244. // self-attention
  9245. {
  9246. // compute Q and K and RoPE them
  9247. struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  9248. cb(Qcur, "Qcur", il);
  9249. struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  9250. cb(Kcur, "Kcur", il);
  9251. struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  9252. cb(Vcur, "Vcur", il);
  9253. Qcur = ggml_rope_ext(
  9254. ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
  9255. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9256. ext_factor, attn_factor, beta_fast, beta_slow
  9257. );
  9258. cb(Qcur, "Qcur", il);
  9259. Kcur = ggml_rope_ext(
  9260. ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
  9261. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9262. ext_factor, attn_factor, beta_fast, beta_slow
  9263. );
  9264. cb(Kcur, "Kcur", il);
  9265. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  9266. model.layers[il].wo, NULL,
  9267. Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  9268. }
  9269. if (il == n_layer - 1) {
  9270. // skip computing output for unused tokens
  9271. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9272. n_tokens = n_outputs;
  9273. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  9274. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  9275. }
  9276. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  9277. cb(ffn_inp, "ffn_inp", il);
  9278. // feed-forward network
  9279. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  9280. model.layers[il].ffn_norm, NULL,
  9281. LLM_NORM_RMS, cb, il);
  9282. cb(cur, "ffn_norm", il);
  9283. cur = llm_build_ffn(ctx0, cur,
  9284. model.layers[il].ffn_up, NULL,
  9285. model.layers[il].ffn_gate, NULL,
  9286. model.layers[il].ffn_down, NULL,
  9287. NULL,
  9288. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  9289. cb(cur, "ffn_out", il);
  9290. struct ggml_tensor * ffn_out = ggml_add(ctx0, cur, ffn_inp);
  9291. cb(ffn_out, "ffn_out", il);
  9292. // MoE
  9293. cur = llm_build_norm(ctx0, inpSA, hparams,
  9294. model.layers[il].ffn_norm_exps, NULL,
  9295. LLM_NORM_RMS, cb, il);
  9296. cb(cur, "ffn_norm_exps", il);
  9297. cur = llm_build_moe_ffn(ctx0, cur,
  9298. model.layers[il].ffn_gate_inp,
  9299. model.layers[il].ffn_up_exps,
  9300. model.layers[il].ffn_gate_exps,
  9301. model.layers[il].ffn_down_exps,
  9302. n_expert, n_expert_used,
  9303. LLM_FFN_SILU, true,
  9304. false, 0.0,
  9305. cb, il);
  9306. cb(cur, "ffn_moe_out", il);
  9307. cur = ggml_add(ctx0, cur, ffn_out);
  9308. cb(cur, "ffn_out", il);
  9309. ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
  9310. if (layer_dir != nullptr) {
  9311. cur = ggml_add(ctx0, cur, layer_dir);
  9312. }
  9313. cb(cur, "l_out", il);
  9314. // input for next layer
  9315. inpL = cur;
  9316. }
  9317. cur = inpL;
  9318. cur = llm_build_norm(ctx0, cur, hparams,
  9319. model.output_norm, NULL,
  9320. LLM_NORM_RMS, cb, -1);
  9321. cb(cur, "result_norm", -1);
  9322. // lm_head
  9323. cur = ggml_mul_mat(ctx0, model.output, cur);
  9324. cb(cur, "result_output", -1);
  9325. ggml_build_forward_expand(gf, cur);
  9326. return gf;
  9327. }
  9328. struct ggml_cgraph * build_deepseek2() {
  9329. struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
  9330. // mutable variable, needed during the last layer of the computation to skip unused tokens
  9331. int32_t n_tokens = this->n_tokens;
  9332. bool is_lite = (hparams.n_layer == 27);
  9333. // We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly.
  9334. // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.
  9335. const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale));
  9336. const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(hparams.n_embd_head_k));
  9337. const float attn_factor_scaled = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale));
  9338. const uint32_t n_embd_head_qk_rope = hparams.n_rot;
  9339. const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
  9340. const uint32_t kv_lora_rank = hparams.n_lora_kv;
  9341. struct ggml_tensor * cur;
  9342. struct ggml_tensor * inpL;
  9343. // {n_embd, n_tokens}
  9344. inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  9345. // inp_pos - contains the positions
  9346. struct ggml_tensor * inp_pos = build_inp_pos();
  9347. // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  9348. struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  9349. for (int il = 0; il < n_layer; ++il) {
  9350. struct ggml_tensor * inpSA = inpL;
  9351. // norm
  9352. cur = llm_build_norm(ctx0, inpL, hparams,
  9353. model.layers[il].attn_norm, NULL,
  9354. LLM_NORM_RMS, cb, il);
  9355. cb(cur, "attn_norm", il);
  9356. // self_attention
  9357. {
  9358. struct ggml_tensor * q = NULL;
  9359. if (!is_lite) {
  9360. // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens}
  9361. q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
  9362. cb(q, "q", il);
  9363. q = llm_build_norm(ctx0, q, hparams,
  9364. model.layers[il].attn_q_a_norm, NULL,
  9365. LLM_NORM_RMS, cb, il);
  9366. cb(q, "q", il);
  9367. // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens}
  9368. q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
  9369. cb(q, "q", il);
  9370. } else {
  9371. q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  9372. cb(q, "q", il);
  9373. }
  9374. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  9375. struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
  9376. ggml_row_size(q->type, hparams.n_embd_head_k),
  9377. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  9378. 0);
  9379. cb(q_nope, "q_nope", il);
  9380. // and {n_head * n_embd_head_qk_rope, n_tokens}
  9381. struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
  9382. ggml_row_size(q->type, hparams.n_embd_head_k),
  9383. ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
  9384. ggml_row_size(q->type, n_embd_head_qk_nope));
  9385. cb(q_pe, "q_pe", il);
  9386. // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
  9387. struct ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
  9388. cb(kv_pe_compresseed, "kv_pe_compresseed", il);
  9389. // split into {kv_lora_rank, n_tokens}
  9390. struct ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
  9391. kv_pe_compresseed->nb[1],
  9392. 0);
  9393. cb(kv_compressed, "kv_compressed", il);
  9394. // and {n_embd_head_qk_rope, n_tokens}
  9395. struct ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
  9396. kv_pe_compresseed->nb[1],
  9397. kv_pe_compresseed->nb[1],
  9398. ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
  9399. cb(k_pe, "k_pe", il);
  9400. kv_compressed = ggml_cont(ctx0, kv_compressed); // TODO: the CUDA backend does not support non-contiguous norm
  9401. kv_compressed = llm_build_norm(ctx0, kv_compressed, hparams,
  9402. model.layers[il].attn_kv_a_norm, NULL,
  9403. LLM_NORM_RMS, cb, il);
  9404. cb(kv_compressed, "kv_compressed", il);
  9405. // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
  9406. struct ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
  9407. cb(kv, "kv", il);
  9408. // split into {n_head * n_embd_head_qk_nope, n_tokens}
  9409. struct ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
  9410. ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
  9411. ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  9412. 0);
  9413. cb(k_nope, "k_nope", il);
  9414. // and {n_head * n_embd_head_v, n_tokens}
  9415. struct ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
  9416. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
  9417. ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
  9418. ggml_row_size(kv->type, (n_embd_head_qk_nope)));
  9419. cb(v_states, "v_states", il);
  9420. v_states = ggml_cont(ctx0, v_states);
  9421. cb(v_states, "v_states", il);
  9422. v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,
  9423. ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),
  9424. 0);
  9425. cb(v_states, "v_states", il);
  9426. q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
  9427. q_pe = ggml_rope_ext(
  9428. ctx0, q_pe, inp_pos, nullptr,
  9429. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9430. ext_factor, attn_factor_scaled, beta_fast, beta_slow
  9431. );
  9432. cb(q_pe, "q_pe", il);
  9433. // shared RoPE key
  9434. k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
  9435. k_pe = ggml_rope_ext(
  9436. ctx0, k_pe, inp_pos, nullptr,
  9437. n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  9438. ext_factor, attn_factor_scaled, beta_fast, beta_slow
  9439. );
  9440. cb(k_pe, "k_pe", il);
  9441. struct ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);
  9442. cb(q_states, "q_states", il);
  9443. struct ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
  9444. cb(k_states, "k_states", il);
  9445. cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
  9446. model.layers[il].wo, NULL,
  9447. k_states, v_states, q_states, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);
  9448. }
  9449. if (il == n_layer - 1) {
  9450. // skip computing output for unused tokens
  9451. struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  9452. n_tokens = n_outputs;
  9453. cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  9454. inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  9455. }
  9456. struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  9457. cb(ffn_inp, "ffn_inp", il);
  9458. if ((uint32_t) il < hparams.n_layer_dense_lead) {
  9459. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  9460. model.layers[il].ffn_norm, NULL,
  9461. LLM_NORM_RMS, cb, il);
  9462. cb(cur, "ffn_norm", il);
  9463. cur = llm_build_ffn(ctx0, cur,
  9464. model.layers[il].ffn_up, NULL,
  9465. model.layers[il].ffn_gate, NULL,
  9466. model.layers[il].ffn_down, NULL,
  9467. NULL,
  9468. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  9469. cb(cur, "ffn_out", il);
  9470. } else {
  9471. // MoE branch
  9472. cur = llm_build_norm(ctx0, ffn_inp, hparams,
  9473. model.layers[il].ffn_norm, NULL,
  9474. LLM_NORM_RMS, cb, il);
  9475. cb(cur, "ffn_norm", il);
  9476. ggml_tensor * moe_out =
  9477. llm_build_moe_ffn(ctx0, cur,
  9478. model.layers[il].ffn_gate_inp,
  9479. model.layers[il].ffn_up_exps,
  9480. model.layers[il].ffn_gate_exps,
  9481. model.layers[il].ffn_down_exps,
  9482. n_expert, n_expert_used,
  9483. LLM_FFN_SILU, false,
  9484. true, hparams.expert_weights_scale,
  9485. cb, il);
  9486. cb(moe_out, "ffn_moe_out", il);
  9487. // FFN shared expert
  9488. {
  9489. ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, cur,
  9490. model.layers[il].ffn_up_shexp, NULL,
  9491. model.layers[il].ffn_gate_shexp, NULL,
  9492. model.layers[il].ffn_down_shexp, NULL,
  9493. NULL,
  9494. LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  9495. cb(ffn_shexp, "ffn_shexp", il);
  9496. cur = ggml_add(ctx0, moe_out, ffn_shexp);
  9497. cb(cur, "ffn_out", il);
  9498. }
  9499. }
  9500. cur = ggml_add(ctx0, cur, ffn_inp);
  9501. cb(cur, "l_out", il);
  9502. // input for next layer
  9503. inpL = cur;
  9504. }
  9505. cur = inpL;
  9506. cur = llm_build_norm(ctx0, cur, hparams,
  9507. model.output_norm, NULL,
  9508. LLM_NORM_RMS, cb, -1);
  9509. cb(cur, "result_norm", -1);
  9510. // lm_head
  9511. cur = ggml_mul_mat(ctx0, model.output, cur);
  9512. cb(cur, "result_output", -1);
  9513. ggml_build_forward_expand(gf, cur);
  9514. return gf;
  9515. }
  9516. };
  9517. static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const std::vector<uint32_t> & ids) {
  9518. llama_batch dummy;
  9519. dummy.n_tokens = 0;
  9520. llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
  9521. struct llm_build_context llm(lctx, dummy, cb, false);
  9522. llm.init();
  9523. struct ggml_cgraph * result = llm.build_defrag(ids);
  9524. llm.free();
  9525. return result;
  9526. }
  9527. static struct ggml_cgraph * llama_build_graph_k_shift(llama_context & lctx) {
  9528. llama_batch dummy;
  9529. dummy.n_tokens = 0;
  9530. llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
  9531. struct llm_build_context llm(lctx, dummy, cb, false);
  9532. llm.init();
  9533. struct ggml_cgraph * result = llm.build_k_shift();
  9534. llm.free();
  9535. return result;
  9536. }
  9537. static struct ggml_cgraph * llama_build_graph_s_copy(llama_context & lctx) {
  9538. llama_batch dummy;
  9539. dummy.n_tokens = 0;
  9540. llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
  9541. struct llm_build_context llm(lctx, dummy, cb, false);
  9542. llm.init();
  9543. struct ggml_cgraph * result = llm.build_s_copy();
  9544. llm.free();
  9545. return result;
  9546. }
  9547. static struct ggml_cgraph * llama_build_graph(
  9548. llama_context & lctx,
  9549. const llama_batch & batch,
  9550. bool worst_case) {
  9551. const auto & model = lctx.model;
  9552. // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
  9553. llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
  9554. if (il >= 0) {
  9555. ggml_format_name(cur, "%s-%d", name, il);
  9556. } else {
  9557. ggml_set_name(cur, name);
  9558. }
  9559. if (!lctx.cparams.offload_kqv) {
  9560. if (strcmp(name, "kqv_merged_cont") == 0) {
  9561. // all nodes between the KV store and the attention output are run on the CPU
  9562. ggml_backend_sched_set_tensor_backend(lctx.sched, cur, lctx.backend_cpu);
  9563. }
  9564. }
  9565. // norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends
  9566. // FIXME: fix in ggml_backend_sched
  9567. const bool full_offload = lctx.model.n_gpu_layers > (int)lctx.model.hparams.n_layer;
  9568. if (batch.n_tokens < 32 || full_offload) {
  9569. if (il != -1 && strcmp(name, "norm") == 0) {
  9570. for (auto * backend : lctx.backends) {
  9571. if (ggml_backend_buft_supports_backend(lctx.model.buft_layer[il].buft, backend)) {
  9572. ggml_backend_sched_set_tensor_backend(lctx.sched, cur, backend);
  9573. break;
  9574. }
  9575. }
  9576. }
  9577. }
  9578. };
  9579. struct ggml_cgraph * result = NULL;
  9580. struct llm_build_context llm(lctx, batch, cb, worst_case);
  9581. llm.init();
  9582. switch (model.arch) {
  9583. case LLM_ARCH_LLAMA:
  9584. {
  9585. result = llm.build_llama();
  9586. } break;
  9587. case LLM_ARCH_BAICHUAN:
  9588. {
  9589. result = llm.build_baichuan();
  9590. } break;
  9591. case LLM_ARCH_FALCON:
  9592. {
  9593. result = llm.build_falcon();
  9594. } break;
  9595. case LLM_ARCH_GROK:
  9596. {
  9597. result = llm.build_grok();
  9598. } break;
  9599. case LLM_ARCH_STARCODER:
  9600. {
  9601. result = llm.build_starcoder();
  9602. } break;
  9603. case LLM_ARCH_REFACT:
  9604. {
  9605. result = llm.build_refact();
  9606. } break;
  9607. case LLM_ARCH_BERT:
  9608. case LLM_ARCH_JINA_BERT_V2:
  9609. case LLM_ARCH_NOMIC_BERT:
  9610. {
  9611. result = llm.build_bert();
  9612. } break;
  9613. case LLM_ARCH_BLOOM:
  9614. {
  9615. result = llm.build_bloom();
  9616. } break;
  9617. case LLM_ARCH_MPT:
  9618. {
  9619. result = llm.build_mpt();
  9620. } break;
  9621. case LLM_ARCH_STABLELM:
  9622. {
  9623. result = llm.build_stablelm();
  9624. } break;
  9625. case LLM_ARCH_QWEN:
  9626. {
  9627. result = llm.build_qwen();
  9628. } break;
  9629. case LLM_ARCH_QWEN2:
  9630. {
  9631. result = llm.build_qwen2();
  9632. } break;
  9633. case LLM_ARCH_QWEN2MOE:
  9634. {
  9635. result = llm.build_qwen2moe();
  9636. } break;
  9637. case LLM_ARCH_PHI2:
  9638. {
  9639. result = llm.build_phi2();
  9640. } break;
  9641. case LLM_ARCH_PHI3:
  9642. {
  9643. result = llm.build_phi3();
  9644. } break;
  9645. case LLM_ARCH_PLAMO:
  9646. {
  9647. result = llm.build_plamo();
  9648. } break;
  9649. case LLM_ARCH_GPT2:
  9650. {
  9651. result = llm.build_gpt2();
  9652. } break;
  9653. case LLM_ARCH_CODESHELL:
  9654. {
  9655. result = llm.build_codeshell();
  9656. } break;
  9657. case LLM_ARCH_ORION:
  9658. {
  9659. result = llm.build_orion();
  9660. } break;
  9661. case LLM_ARCH_INTERNLM2:
  9662. {
  9663. result = llm.build_internlm2();
  9664. } break;
  9665. case LLM_ARCH_MINICPM:
  9666. {
  9667. result = llm.build_minicpm();
  9668. } break;
  9669. case LLM_ARCH_GEMMA:
  9670. {
  9671. result = llm.build_gemma();
  9672. } break;
  9673. case LLM_ARCH_STARCODER2:
  9674. {
  9675. result = llm.build_starcoder2();
  9676. } break;
  9677. case LLM_ARCH_MAMBA:
  9678. {
  9679. result = llm.build_mamba();
  9680. } break;
  9681. case LLM_ARCH_XVERSE:
  9682. {
  9683. result = llm.build_xverse();
  9684. } break;
  9685. case LLM_ARCH_COMMAND_R:
  9686. {
  9687. result = llm.build_command_r();
  9688. } break;
  9689. case LLM_ARCH_DBRX:
  9690. {
  9691. result = llm.build_dbrx();
  9692. } break;
  9693. case LLM_ARCH_OLMO:
  9694. {
  9695. result = llm.build_olmo();
  9696. } break;
  9697. case LLM_ARCH_GPTNEOX:
  9698. {
  9699. result = llm.build_gptneox();
  9700. } break;
  9701. case LLM_ARCH_ARCTIC:
  9702. {
  9703. result = llm.build_arctic();
  9704. } break;
  9705. case LLM_ARCH_DEEPSEEK2:
  9706. {
  9707. result = llm.build_deepseek2();
  9708. } break;
  9709. default:
  9710. GGML_ASSERT(false);
  9711. }
  9712. llm.free();
  9713. return result;
  9714. }
  9715. static void llama_set_k_shift(llama_context & lctx) {
  9716. const int64_t kv_size = lctx.kv_self.size;
  9717. assert(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer));
  9718. int32_t * data = (int32_t *) lctx.inp_K_shift->data;
  9719. for (int i = 0; i < kv_size; ++i) {
  9720. data[i] = lctx.kv_self.cells[i].delta;
  9721. }
  9722. }
  9723. static void llama_set_s_copy(llama_context & lctx) {
  9724. const int64_t kv_size = lctx.kv_self.size;
  9725. assert(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer));
  9726. int32_t * data = (int32_t *) lctx.inp_s_copy->data;
  9727. for (int i = 0; i < kv_size; ++i) {
  9728. data[i] = lctx.kv_self.cells[i].src;
  9729. }
  9730. }
  9731. static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
  9732. //
  9733. // set input data
  9734. //
  9735. const auto & hparams = lctx.model.hparams;
  9736. const auto & cparams = lctx.cparams;
  9737. const auto & kv_self = lctx.kv_self;
  9738. if (batch.token) {
  9739. const int64_t n_tokens = batch.n_tokens;
  9740. ggml_backend_tensor_set(lctx.inp_tokens, batch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens));
  9741. }
  9742. if (batch.embd) {
  9743. const int64_t n_embd = hparams.n_embd;
  9744. const int64_t n_tokens = batch.n_tokens;
  9745. ggml_backend_tensor_set(lctx.inp_embd, batch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd));
  9746. }
  9747. if (batch.pos && lctx.inp_pos) {
  9748. const int64_t n_tokens = batch.n_tokens;
  9749. ggml_backend_tensor_set(lctx.inp_pos, batch.pos, 0, n_tokens*ggml_element_size(lctx.inp_pos));
  9750. }
  9751. if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
  9752. GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs");
  9753. const int64_t n_tokens = batch.n_tokens;
  9754. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_out_ids->buffer));
  9755. int32_t * data = (int32_t *) lctx.inp_out_ids->data;
  9756. if (lctx.n_outputs == n_tokens) {
  9757. for (int i = 0; i < n_tokens; ++i) {
  9758. data[i] = i;
  9759. }
  9760. } else if (batch.logits) {
  9761. int32_t n_outputs = 0;
  9762. for (int i = 0; i < n_tokens; ++i) {
  9763. if (batch.logits[i]) {
  9764. data[n_outputs++] = i;
  9765. }
  9766. }
  9767. // the graph needs to have been passed the correct number of outputs
  9768. GGML_ASSERT(lctx.n_outputs == n_outputs);
  9769. } else if (lctx.n_outputs == 1) {
  9770. // only keep last output
  9771. data[0] = n_tokens - 1;
  9772. } else {
  9773. GGML_ASSERT(lctx.n_outputs == 0);
  9774. }
  9775. }
  9776. GGML_ASSERT(
  9777. // (!a || b) is a logical implication (a -> b)
  9778. // !hparams.causal_attn -> !cparams.causal_attn
  9779. (hparams.causal_attn || !cparams.causal_attn) &&
  9780. "causal attention with embedding models is not supported"
  9781. );
  9782. if (lctx.inp_KQ_mask) {
  9783. // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache.
  9784. if (cparams.causal_attn) {
  9785. const int64_t n_kv = kv_self.n;
  9786. const int64_t n_tokens = batch.n_tokens;
  9787. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
  9788. float * data = (float *) lctx.inp_KQ_mask->data;
  9789. // For causal attention, use only the previous KV cells
  9790. // of the correct sequence for each token of the batch.
  9791. // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
  9792. for (int h = 0; h < 1; ++h) {
  9793. for (int j = 0; j < n_tokens; ++j) {
  9794. const llama_pos pos = batch.pos[j];
  9795. const llama_seq_id seq_id = batch.seq_id[j][0];
  9796. for (int i = 0; i < n_kv; ++i) {
  9797. float f;
  9798. if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
  9799. f = -INFINITY;
  9800. } else {
  9801. if (hparams.use_alibi) {
  9802. f = -fabs(lctx.kv_self.cells[i].pos - pos);
  9803. } else {
  9804. f = 0.0f;
  9805. }
  9806. }
  9807. data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
  9808. }
  9809. }
  9810. for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
  9811. for (int j = 0; j < n_kv; ++j) {
  9812. data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
  9813. }
  9814. }
  9815. }
  9816. } else {
  9817. // when using kv cache, the mask needs to match the kv cache size
  9818. const int64_t n_tokens = batch.n_tokens;
  9819. const int64_t n_stride = hparams.causal_attn ? kv_self.n : n_tokens;
  9820. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
  9821. float * data = (float *) lctx.inp_KQ_mask->data;
  9822. for (int h = 0; h < 1; ++h) {
  9823. for (int j = 0; j < n_tokens; ++j) {
  9824. const llama_seq_id seq_id = batch.seq_id[j][0];
  9825. for (int i = 0; i < n_tokens; ++i) {
  9826. float f = -INFINITY;
  9827. for (int s = 0; s < batch.n_seq_id[i]; ++s) {
  9828. if (batch.seq_id[i][s] == seq_id) {
  9829. if (hparams.use_alibi) {
  9830. f = -fabs(batch.pos[i] - batch.pos[j]);
  9831. } else {
  9832. f = 0.0f;
  9833. }
  9834. break;
  9835. }
  9836. }
  9837. data[h*(n_tokens*n_tokens) + j*n_stride + i] = f;
  9838. }
  9839. for (int i = n_tokens; i < n_stride; ++i) {
  9840. data[h*(n_tokens*n_tokens) + j*n_stride + i] = -INFINITY;
  9841. }
  9842. }
  9843. }
  9844. }
  9845. }
  9846. if (cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
  9847. const int64_t n_tokens = batch.n_tokens;
  9848. GGML_ASSERT(lctx.inp_mean);
  9849. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
  9850. float * data = (float *) lctx.inp_mean->data;
  9851. memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean));
  9852. std::vector<uint64_t> sum(n_tokens, 0);
  9853. for (int i = 0; i < n_tokens; ++i) {
  9854. const llama_seq_id seq_id = batch.seq_id[i][0];
  9855. GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
  9856. sum[seq_id] += 1;
  9857. }
  9858. std::vector<float> div(n_tokens, 0.0f);
  9859. for (int i = 0; i < n_tokens; ++i) {
  9860. const uint64_t s = sum[i];
  9861. if (s > 0) {
  9862. div[i] = 1.0f/float(s);
  9863. }
  9864. }
  9865. for (int i = 0; i < n_tokens; ++i) {
  9866. const llama_seq_id seq_id = batch.seq_id[i][0];
  9867. data[seq_id*n_tokens + i] = div[seq_id];
  9868. }
  9869. }
  9870. if (cparams.pooling_type == LLAMA_POOLING_TYPE_CLS) {
  9871. const int64_t n_tokens = batch.n_tokens;
  9872. GGML_ASSERT(lctx.inp_cls);
  9873. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
  9874. uint32_t * data = (uint32_t *) lctx.inp_cls->data;
  9875. memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
  9876. for (int i = 0; i < n_tokens; ++i) {
  9877. const llama_seq_id seq_id = batch.seq_id[i][0];
  9878. const llama_pos pos = batch.pos[i];
  9879. GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS");
  9880. if (pos == 0) {
  9881. data[seq_id] = i;
  9882. }
  9883. }
  9884. }
  9885. if (kv_self.recurrent) {
  9886. const int64_t n_kv = kv_self.n;
  9887. if (lctx.inp_s_mask) {
  9888. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_mask->buffer));
  9889. float * data = (float *) lctx.inp_s_mask->data;
  9890. // states which are not affected by the current batch are left untouched
  9891. for (int i = 0; i < n_kv; ++i) {
  9892. llama_seq_id seq_id = i + lctx.kv_self.head;
  9893. llama_kv_cell & kv_cell = lctx.kv_self.cells[seq_id];
  9894. bool has_self_seq = kv_cell.has_seq_id(seq_id);
  9895. data[i] = (float) has_self_seq;
  9896. // ensure current sequences will be kept
  9897. if (!has_self_seq && kv_cell.pos >= 0) {
  9898. kv_cell.seq_id.insert(seq_id);
  9899. }
  9900. }
  9901. }
  9902. // For Mamba (and other recurrent architectures),
  9903. // update the correct state(s)/sequence(s) for each token of the batch.
  9904. // Like with the KQ_mask, if a token in the batch has multiple sequences,
  9905. // they are assumed to be equivalent (not here, but in ggml_ssm_scan and ggml_ssm_conv).
  9906. if (lctx.inp_s_seq) {
  9907. const int64_t n_tokens = batch.n_tokens;
  9908. GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_seq->buffer));
  9909. int32_t * data = (int32_t *) lctx.inp_s_seq->data;
  9910. for (int j = 0; j < n_tokens; ++j) {
  9911. const int32_t n_seq = batch.n_seq_id[j];
  9912. GGML_ASSERT(0 < n_seq); // a token should be part of at least 1 sequence
  9913. for (int i = 0; i < n_kv; ++i) {
  9914. if (i < n_seq) {
  9915. // for this type of model, the head is the minimum seq_id of the batch
  9916. data[j*n_kv + i] = batch.seq_id[j][i] - kv_self.head;
  9917. } else {
  9918. data[j*n_kv + i] = -1;
  9919. }
  9920. }
  9921. }
  9922. }
  9923. }
  9924. }
  9925. // Make sure enough space is available for outputs.
  9926. // Returns max number of outputs for which space was reserved.
  9927. static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
  9928. const auto & cparams = lctx.cparams;
  9929. const auto & hparams = lctx.model.hparams;
  9930. const size_t n_outputs_max = std::max(n_outputs, (size_t) cparams.n_seq_max);
  9931. const auto n_batch = cparams.n_batch;
  9932. const auto n_vocab = hparams.n_vocab;
  9933. const auto n_embd = hparams.n_embd;
  9934. // TODO: use a per-batch flag for logits presence instead
  9935. const bool has_logits = cparams.causal_attn;
  9936. const bool has_embd = cparams.embeddings && (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE);
  9937. const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
  9938. const size_t embd_size = has_embd ? n_embd*n_outputs_max : 0;
  9939. if (lctx.output_ids.empty()) {
  9940. // init, never resized afterwards
  9941. lctx.output_ids.resize(n_batch);
  9942. }
  9943. const size_t prev_size = lctx.buf_output ? ggml_backend_buffer_get_size(lctx.buf_output) : 0;
  9944. const size_t new_size = (logits_size + embd_size) * sizeof(float);
  9945. // alloc only when more than the current capacity is required
  9946. // TODO: also consider shrinking the buffer
  9947. if (!lctx.buf_output || prev_size < new_size) {
  9948. if (lctx.buf_output) {
  9949. #ifndef NDEBUG
  9950. // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark)
  9951. LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
  9952. #endif
  9953. ggml_backend_buffer_free(lctx.buf_output);
  9954. lctx.buf_output = nullptr;
  9955. lctx.logits = nullptr;
  9956. lctx.embd = nullptr;
  9957. }
  9958. lctx.buf_output = ggml_backend_buft_alloc_buffer(llama_default_buffer_type_cpu(true), new_size);
  9959. if (lctx.buf_output == nullptr) {
  9960. LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0));
  9961. return 0;
  9962. }
  9963. }
  9964. float * output_base = (float *) ggml_backend_buffer_get_base(lctx.buf_output);
  9965. lctx.logits = has_logits ? output_base : nullptr;
  9966. lctx.embd = has_embd ? output_base + logits_size : nullptr;
  9967. lctx.output_size = n_outputs_max;
  9968. lctx.logits_size = logits_size;
  9969. lctx.embd_size = embd_size;
  9970. // set all ids as invalid (negative)
  9971. std::fill(lctx.output_ids.begin(), lctx.output_ids.end(), -1);
  9972. ggml_backend_buffer_clear(lctx.buf_output, 0);
  9973. lctx.n_outputs = 0;
  9974. return n_outputs_max;
  9975. }
  9976. static void llama_graph_compute(
  9977. llama_context & lctx,
  9978. ggml_cgraph * gf,
  9979. int n_threads) {
  9980. #ifdef GGML_USE_METAL
  9981. if (ggml_backend_is_metal(lctx.backend_metal)) {
  9982. ggml_backend_metal_set_n_cb(lctx.backend_metal, n_threads);
  9983. }
  9984. #endif
  9985. if (lctx.backend_cpu != nullptr) {
  9986. ggml_backend_cpu_set_n_threads(lctx.backend_cpu, n_threads);
  9987. ggml_backend_cpu_set_abort_callback(lctx.backend_cpu, lctx.abort_callback, lctx.abort_callback_data);
  9988. }
  9989. ggml_backend_sched_graph_compute_async(lctx.sched, gf);
  9990. // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(lctx.sched));
  9991. }
  9992. // decode a batch of tokens by evaluating the transformer
  9993. //
  9994. // - lctx: llama context
  9995. // - batch: batch to evaluate
  9996. //
  9997. // return 0 on success
  9998. // return positive int on warning
  9999. // return negative int on error
  10000. //
  10001. static int llama_decode_internal(
  10002. llama_context & lctx,
  10003. llama_batch batch_all) { // TODO: rename back to batch
  10004. const uint32_t n_tokens_all = batch_all.n_tokens;
  10005. if (n_tokens_all == 0) {
  10006. LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
  10007. return -1;
  10008. }
  10009. const auto & model = lctx.model;
  10010. const auto & hparams = model.hparams;
  10011. const auto & cparams = lctx.cparams;
  10012. GGML_ASSERT((!batch_all.token && batch_all.embd) || (batch_all.token && !batch_all.embd)); // NOLINT
  10013. GGML_ASSERT(n_tokens_all <= cparams.n_batch);
  10014. GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
  10015. if (lctx.t_compute_start_us == 0) {
  10016. lctx.t_compute_start_us = ggml_time_us();
  10017. }
  10018. lctx.n_queued_tokens += n_tokens_all;
  10019. auto & kv_self = lctx.kv_self;
  10020. const int64_t n_embd = hparams.n_embd;
  10021. const int64_t n_vocab = hparams.n_vocab;
  10022. uint32_t n_outputs = 0;
  10023. uint32_t n_outputs_prev = 0;
  10024. const auto n_ubatch = cparams.n_ubatch;
  10025. std::vector<llama_pos> pos;
  10026. std::vector<int32_t> n_seq_id;
  10027. std::vector<llama_seq_id *> seq_id_arr;
  10028. std::vector<std::vector<llama_seq_id>> seq_id;
  10029. // count outputs
  10030. if (batch_all.logits) {
  10031. for (uint32_t i = 0; i < n_tokens_all; ++i) {
  10032. n_outputs += batch_all.logits[i] != 0;
  10033. }
  10034. } else if (lctx.logits_all || (cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE)) {
  10035. n_outputs = n_tokens_all;
  10036. } else {
  10037. // keep last output only
  10038. n_outputs = 1;
  10039. }
  10040. // reserve output buffer
  10041. if (llama_output_reserve(lctx, n_outputs) < n_outputs) {
  10042. LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_outputs);
  10043. return -2;
  10044. };
  10045. // set output mappings
  10046. if (batch_all.logits) {
  10047. int32_t i_logits = 0;
  10048. for (uint32_t i = 0; i < n_tokens_all; ++i) {
  10049. if (batch_all.logits[i]) {
  10050. lctx.output_ids[i] = i_logits++;
  10051. }
  10052. }
  10053. } else {
  10054. for (uint32_t i = 0; i < n_outputs; ++i) {
  10055. lctx.output_ids[i] = i;
  10056. }
  10057. }
  10058. for (uint32_t cur_token = 0; cur_token < n_tokens_all; cur_token += n_ubatch) {
  10059. const uint32_t n_tokens = std::min(n_ubatch, n_tokens_all - cur_token);
  10060. llama_batch u_batch = {
  10061. /* .n_tokens = */ (int32_t) n_tokens,
  10062. /* .token = */ batch_all.token ? batch_all.token + cur_token : nullptr,
  10063. /* .embd = */ batch_all.embd ? batch_all.embd + cur_token*n_embd : nullptr,
  10064. /* .pos = */ batch_all.pos ? batch_all.pos + cur_token : nullptr,
  10065. /* .n_seq_id = */ batch_all.n_seq_id ? batch_all.n_seq_id + cur_token : nullptr,
  10066. /* .seq_id = */ batch_all.seq_id ? batch_all.seq_id + cur_token : nullptr,
  10067. /* .logits = */ batch_all.logits ? batch_all.logits + cur_token : nullptr,
  10068. /* .all_pos_0 = */ batch_all.all_pos_0 + (llama_pos) cur_token*batch_all.all_pos_1,
  10069. /* .all_pos_1 = */ batch_all.all_pos_1,
  10070. /* .all_seq_id = */ batch_all.all_seq_id,
  10071. };
  10072. // count the outputs in this u_batch
  10073. {
  10074. int32_t n_outputs_new = 0;
  10075. if (u_batch.logits) {
  10076. for (uint32_t i = 0; i < n_tokens; i++) {
  10077. n_outputs_new += u_batch.logits[i] != 0;
  10078. }
  10079. } else if (n_outputs == n_tokens_all) {
  10080. n_outputs_new = n_tokens;
  10081. } else {
  10082. // keep last output only
  10083. if (cur_token + n_tokens >= n_tokens_all) {
  10084. n_outputs_new = 1;
  10085. }
  10086. }
  10087. // needs to happen before the graph is built
  10088. lctx.n_outputs = n_outputs_new;
  10089. }
  10090. int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
  10091. GGML_ASSERT(n_threads > 0);
  10092. // helpers for smoother batch API transition
  10093. // after deprecating the llama_eval calls, these will be removed
  10094. if (u_batch.pos == nullptr) {
  10095. pos.resize(n_tokens);
  10096. for (uint32_t i = 0; i < n_tokens; i++) {
  10097. pos[i] = u_batch.all_pos_0 + i*u_batch.all_pos_1;
  10098. }
  10099. u_batch.pos = pos.data();
  10100. }
  10101. if (u_batch.seq_id == nullptr) {
  10102. n_seq_id.resize(n_tokens);
  10103. seq_id.resize(n_tokens);
  10104. seq_id_arr.resize(n_tokens);
  10105. for (uint32_t i = 0; i < n_tokens; i++) {
  10106. n_seq_id[i] = 1;
  10107. seq_id[i].resize(1);
  10108. seq_id[i][0] = u_batch.all_seq_id;
  10109. seq_id_arr[i] = seq_id[i].data();
  10110. }
  10111. u_batch.n_seq_id = n_seq_id.data();
  10112. u_batch.seq_id = seq_id_arr.data();
  10113. }
  10114. // non-causal masks do not use the KV cache
  10115. if (hparams.causal_attn) {
  10116. llama_kv_cache_update(&lctx);
  10117. // if we have enough unused cells before the current head ->
  10118. // better to start searching from the beginning of the cache, hoping to fill it
  10119. if (kv_self.head > kv_self.used + 2*n_tokens) {
  10120. kv_self.head = 0;
  10121. }
  10122. if (!llama_kv_cache_find_slot(kv_self, u_batch)) {
  10123. return 1;
  10124. }
  10125. if (!kv_self.recurrent) {
  10126. // a heuristic, to avoid attending the full cache if it is not yet utilized
  10127. // after enough generations, the benefit from this heuristic disappears
  10128. // if we start defragmenting the cache, the benefit from this will be more important
  10129. const uint32_t pad = llama_kv_cache_get_padding(cparams);
  10130. kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(llama_kv_cache_cell_max(kv_self), pad)));
  10131. //kv_self.n = llama_kv_cache_cell_max(kv_self);
  10132. }
  10133. }
  10134. //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
  10135. ggml_backend_sched_reset(lctx.sched);
  10136. ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data);
  10137. ggml_cgraph * gf = llama_build_graph(lctx, u_batch, false);
  10138. // the output is always the last tensor in the graph
  10139. struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
  10140. struct ggml_tensor * embd = gf->nodes[gf->n_nodes - 2];
  10141. if (lctx.n_outputs == 0) {
  10142. // no output
  10143. res = nullptr;
  10144. embd = nullptr;
  10145. } else if (!hparams.causal_attn) {
  10146. res = nullptr; // do not extract logits for embedding models such as BERT
  10147. // token or sequence embeddings
  10148. embd = gf->nodes[gf->n_nodes - 1];
  10149. GGML_ASSERT(strcmp(embd->name, "result_embd") == 0 || strcmp(embd->name, "result_embd_pooled") == 0);
  10150. } else if (cparams.embeddings) {
  10151. // the embeddings could be in the second to last tensor, or any of the previous tensors
  10152. int i_embd = gf->n_nodes - 2;
  10153. for (int i = 3; strcmp(embd->name, "result_norm") != 0; ++i) {
  10154. i_embd = gf->n_nodes - i;
  10155. if (i_embd < 0) { break; }
  10156. embd = gf->nodes[i_embd];
  10157. }
  10158. GGML_ASSERT(i_embd >= 0 && "missing result_norm tensor");
  10159. // TODO: use a per-batch flag to know when to skip logits while keeping embeddings
  10160. if (!cparams.causal_attn) {
  10161. res = nullptr; // do not extract logits when not needed
  10162. // skip computing logits
  10163. // TODO: is this safe?
  10164. gf->n_nodes = i_embd + 1;
  10165. }
  10166. } else {
  10167. embd = nullptr; // do not extract embeddings when not needed
  10168. GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");
  10169. }
  10170. // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
  10171. // for big prompts, if BLAS is enabled, it is better to use only one thread
  10172. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  10173. // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
  10174. // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
  10175. // with the BLAS calls. need a better solution
  10176. // MoE Special Case: This logic applies when hparams.n_expert == 0, i.e. the model is NOT an MoE model. When an MoE is
  10177. // being processed then Accelerate/BLAS will not be involved, so capping would limit performance.
  10178. if (n_tokens >= 32 && hparams.n_expert == 0 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
  10179. n_threads = std::min(4, n_threads);
  10180. }
  10181. ggml_backend_sched_alloc_graph(lctx.sched, gf);
  10182. llama_set_inputs(lctx, u_batch);
  10183. llama_graph_compute(lctx, gf, n_threads);
  10184. // update the kv ring buffer
  10185. {
  10186. kv_self.head += n_tokens;
  10187. // Ensure kv cache head points to a valid index.
  10188. if (kv_self.head >= kv_self.size) {
  10189. kv_self.head = 0;
  10190. }
  10191. }
  10192. #ifdef GGML_PERF
  10193. // print timing information per ggml operation (for debugging purposes)
  10194. // requires GGML_PERF to be defined
  10195. ggml_graph_print(gf);
  10196. #endif
  10197. // plot the computation graph in dot format (for debugging purposes)
  10198. //if (n_past%100 == 0) {
  10199. // ggml_graph_dump_dot(gf, NULL, "llama.dot");
  10200. //}
  10201. // extract logits
  10202. if (res) {
  10203. ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(lctx.sched, res);
  10204. GGML_ASSERT(backend_res != nullptr);
  10205. GGML_ASSERT(lctx.logits != nullptr);
  10206. float * logits_out = lctx.logits + n_outputs_prev*n_vocab;
  10207. const int32_t n_outputs_new = lctx.n_outputs;
  10208. if (n_outputs_new) {
  10209. GGML_ASSERT( n_outputs_prev + n_outputs_new <= n_outputs);
  10210. GGML_ASSERT((n_outputs_prev + n_outputs_new)*n_vocab <= (int64_t) lctx.logits_size);
  10211. ggml_backend_tensor_get_async(backend_res, res, logits_out, 0, n_outputs_new*n_vocab*sizeof(float));
  10212. }
  10213. }
  10214. // extract embeddings
  10215. if (embd) {
  10216. ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(lctx.sched, embd);
  10217. GGML_ASSERT(backend_embd != nullptr);
  10218. switch (cparams.pooling_type) {
  10219. case LLAMA_POOLING_TYPE_NONE:
  10220. {
  10221. // extract token embeddings
  10222. GGML_ASSERT(lctx.embd != nullptr);
  10223. float * embd_out = lctx.embd + n_outputs_prev*n_embd;
  10224. const int32_t n_outputs_new = lctx.n_outputs;
  10225. if (n_outputs_new) {
  10226. GGML_ASSERT( n_outputs_prev + n_outputs_new <= n_outputs);
  10227. GGML_ASSERT((n_outputs_prev + n_outputs_new)*n_embd <= (int64_t) lctx.embd_size);
  10228. ggml_backend_tensor_get_async(backend_embd, embd, embd_out, 0, n_outputs_new*n_embd*sizeof(float));
  10229. }
  10230. } break;
  10231. case LLAMA_POOLING_TYPE_CLS:
  10232. case LLAMA_POOLING_TYPE_MEAN:
  10233. {
  10234. GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0);
  10235. // extract sequence embeddings
  10236. auto & embd_seq_out = lctx.embd_seq;
  10237. embd_seq_out.clear();
  10238. for (uint32_t i = 0; i < n_tokens; i++) {
  10239. const llama_seq_id seq_id = u_batch.seq_id[i][0];
  10240. if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
  10241. continue;
  10242. }
  10243. embd_seq_out[seq_id].resize(n_embd);
  10244. ggml_backend_tensor_get_async(backend_embd, embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
  10245. }
  10246. } break;
  10247. case LLAMA_POOLING_TYPE_UNSPECIFIED:
  10248. {
  10249. GGML_ASSERT(false && "unknown pooling type");
  10250. } break;
  10251. }
  10252. }
  10253. n_outputs_prev += lctx.n_outputs;
  10254. }
  10255. // set to total number of outputs in the batch, for use in llama_get_logits_ith
  10256. lctx.n_outputs = n_outputs;
  10257. // wait for the computation to finish (automatically done when obtaining the model output)
  10258. //llama_synchronize(&lctx);
  10259. // decide if we need to defrag the kv cache
  10260. if (cparams.causal_attn && cparams.defrag_thold >= 0.0f) {
  10261. const float fragmentation = kv_self.n >= 128 ? 1.0f - float(kv_self.used)/float(kv_self.n) : 0.0f;
  10262. // queue defragmentation for next llama_kv_cache_update
  10263. if (fragmentation > cparams.defrag_thold) {
  10264. //LLAMA_LOG_INFO("fragmentation: %.2f\n", fragmentation);
  10265. llama_kv_cache_defrag(kv_self);
  10266. }
  10267. }
  10268. // Reset state for the next token before backend sync, to allow the CPU activities in the reset to
  10269. // overlap with device computation.
  10270. ggml_backend_sched_reset(lctx.sched);
  10271. return 0;
  10272. }
  10273. // find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
  10274. static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
  10275. auto & kv_self = lctx.kv_self;
  10276. const auto & hparams = lctx.model.hparams;
  10277. const uint32_t n_layer = hparams.n_layer;
  10278. const uint32_t n_kv = llama_kv_cache_cell_max(kv_self);
  10279. const uint32_t n_used = kv_self.used;
  10280. assert(n_used <= n_kv);
  10281. //const int64_t t_start = ggml_time_us();
  10282. // number of cells moved
  10283. uint32_t n_moves = 0;
  10284. // each move requires 6*n_layer tensors (see build_defrag)
  10285. // - source view, destination view, copy operation
  10286. // - x2 for keys and values
  10287. //const uint32_t max_moves = LLAMA_MAX_NODES/(6*n_layer);
  10288. // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516
  10289. const uint32_t max_moves = (LLAMA_MAX_NODES - 2*n_layer)/(6*n_layer);
  10290. // determine which KV cells to move where
  10291. //
  10292. // cell i moves to ids[i]
  10293. //
  10294. // if ids[i] == i || ids[i] == n_kv, then cell i is not moved
  10295. //
  10296. std::vector<uint32_t> ids(n_kv, n_kv);
  10297. for (uint32_t i0 = 0; i0 < n_used; ++i0) {
  10298. const auto & cell0 = kv_self.cells[i0];
  10299. if (!cell0.is_empty()) {
  10300. ids[i0] = i0;
  10301. continue;
  10302. }
  10303. // found a hole - fill it with data from the end of the cache
  10304. uint32_t nh = 1;
  10305. // determine the size of the hole
  10306. while (i0 + nh < n_used && kv_self.cells[i0 + nh].is_empty()) {
  10307. nh++;
  10308. }
  10309. uint32_t nf = 0;
  10310. uint32_t is = n_kv - 1;
  10311. // starting from the end, find nh non-empty cells
  10312. for (; is > i0; --is) {
  10313. const auto & cell1 = kv_self.cells[is];
  10314. if (cell1.is_empty() || ids[is] != n_kv) {
  10315. continue;
  10316. }
  10317. // non-empty cell which is not yet moved
  10318. nf++;
  10319. if (nf == nh) {
  10320. break;
  10321. }
  10322. }
  10323. // this can only happen if `n_used` is not accurate, which would be a bug
  10324. GGML_ASSERT(nf == nh && "KV defrag bug: nf != nh");
  10325. nf = 0;
  10326. uint32_t i1 = is;
  10327. // are we moving a continuous block of memory?
  10328. bool cont = false;
  10329. // should we stop searching for the next move?
  10330. bool stop = false;
  10331. // go back and move the nf cells to the hole
  10332. for (; i1 < n_kv; ++i1) {
  10333. auto & cell1 = kv_self.cells[i1];
  10334. if (cell1.is_empty() || ids[i1] != n_kv) {
  10335. if (n_moves == max_moves) {
  10336. stop = true;
  10337. break;
  10338. }
  10339. cont = false;
  10340. continue;
  10341. }
  10342. // this cell goes to (i0 + nf)
  10343. ids[i1] = i0 + nf;
  10344. // move the cell meta data
  10345. kv_self.cells[i0 + nf] = cell1;
  10346. // clear the old cell and move the head there
  10347. cell1 = llama_kv_cell();
  10348. kv_self.head = n_used;
  10349. if (!cont) {
  10350. n_moves++;
  10351. cont = true;
  10352. }
  10353. nf++;
  10354. if (nf == nh) {
  10355. break;
  10356. }
  10357. }
  10358. if (stop || n_moves == max_moves) {
  10359. break;
  10360. }
  10361. //LLAMA_LOG_INFO("(tmp log) KV defrag: move [%u, %u) to [%u, %u)\n", is, i1 + 1, i0, i0 + nh);
  10362. i0 += nh - 1;
  10363. }
  10364. if (n_moves == 0) {
  10365. return;
  10366. }
  10367. //LLAMA_LOG_INFO("(tmp log) KV defrag cell moves: %u\n", n_moves);
  10368. //LLAMA_LOG_INFO("expected gf nodes: %u\n", 6*n_moves*n_layer);
  10369. #if 0
  10370. // CPU defrag
  10371. //
  10372. // TODO: optimizations are possible:
  10373. // - multiple threads
  10374. // - avoid copying to the host memory when already there
  10375. //
  10376. // likely not worth the effort, as we have ggml_graph based defrag
  10377. //
  10378. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa();
  10379. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa();
  10380. const uint32_t kv_size = kv_self.size;
  10381. std::vector<uint8_t> buf_k;
  10382. std::vector<uint8_t> buf_v;
  10383. for (uint32_t il = 0; il < n_layer; ++il) {
  10384. const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
  10385. const size_t k_size = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*kv_size);
  10386. const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
  10387. const size_t v_size = ggml_row_size (kv_self.v_l[il]->type, n_embd_v_gqa*kv_size);
  10388. buf_k.resize(k_size);
  10389. buf_v.resize(v_size);
  10390. ggml_backend_tensor_get(kv_self.k_l[il], buf_k.data(), 0, buf_k.size());
  10391. ggml_backend_tensor_get(kv_self.v_l[il], buf_v.data(), 0, buf_v.size());
  10392. // batch move [i, i+nm) to [id, id+nm)
  10393. // note: cells can move only to a lower index
  10394. for (uint32_t i = 0; i < n_kv; ++i) {
  10395. const uint32_t id = ids[i];
  10396. if (i == id || id == n_kv) {
  10397. continue;
  10398. }
  10399. uint32_t nm = 1;
  10400. while (i + nm < n_kv && ids[i + nm] == id + nm) {
  10401. nm++;
  10402. }
  10403. // move keys
  10404. {
  10405. const int64_t os = i*k_size_row;
  10406. const int64_t od = id*k_size_row;
  10407. memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row);
  10408. }
  10409. // move values (note: they are transposed)
  10410. {
  10411. const int64_t os = i;
  10412. const int64_t od = id;
  10413. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  10414. memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el);
  10415. }
  10416. }
  10417. i += nm - 1;
  10418. }
  10419. ggml_backend_tensor_set(kv_self.k_l[il], buf_k.data(), 0, buf_k.size());
  10420. ggml_backend_tensor_set(kv_self.v_l[il], buf_v.data(), 0, buf_v.size());
  10421. }
  10422. #else
  10423. // ggml_graph defrag
  10424. ggml_backend_sched_reset(lctx.sched);
  10425. ggml_cgraph * gf = llama_build_graph_defrag(lctx, ids);
  10426. llama_graph_compute(lctx, gf, lctx.cparams.n_threads);
  10427. #endif
  10428. //const int64_t t_end = ggml_time_us();
  10429. //LLAMA_LOG_INFO("(tmp log) KV defrag time: %.3f ms\n", (t_end - t_start)/1000.0);
  10430. }
  10431. static void llama_kv_cache_update_internal(struct llama_context & lctx) {
  10432. bool need_reserve = false;
  10433. // apply K-shift if needed
  10434. if (lctx.model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE && lctx.kv_self.has_shift) {
  10435. {
  10436. ggml_backend_sched_reset(lctx.sched);
  10437. ggml_cgraph * gf = llama_build_graph_k_shift(lctx);
  10438. ggml_backend_sched_alloc_graph(lctx.sched, gf);
  10439. llama_set_k_shift(lctx);
  10440. llama_graph_compute(lctx, gf, lctx.cparams.n_threads);
  10441. need_reserve = true;
  10442. }
  10443. {
  10444. auto & kv_self = lctx.kv_self;
  10445. kv_self.has_shift = false;
  10446. for (uint32_t i = 0; i < kv_self.size; ++i) {
  10447. kv_self.cells[i].delta = 0;
  10448. }
  10449. }
  10450. }
  10451. if (lctx.kv_self.recurrent && lctx.kv_self.do_copy) {
  10452. {
  10453. ggml_backend_sched_reset(lctx.sched);
  10454. ggml_cgraph * gf = llama_build_graph_s_copy(lctx);
  10455. ggml_backend_sched_alloc_graph(lctx.sched, gf);
  10456. llama_set_s_copy(lctx);
  10457. llama_graph_compute(lctx, gf, lctx.cparams.n_threads);
  10458. need_reserve = true;
  10459. }
  10460. {
  10461. auto & kv_self = lctx.kv_self;
  10462. kv_self.do_copy = false;
  10463. for (uint32_t i = 0; i < kv_self.size; ++i) {
  10464. kv_self.cells[i].src = i;
  10465. }
  10466. }
  10467. }
  10468. // defragment the KV cache if needed
  10469. if (lctx.kv_self.do_defrag) {
  10470. llama_kv_cache_defrag_internal(lctx);
  10471. need_reserve = true;
  10472. lctx.kv_self.do_defrag = false;
  10473. }
  10474. // reserve a worst case graph again
  10475. if (need_reserve) {
  10476. // TODO: extract to a function
  10477. // build worst-case graph
  10478. int n_tokens = (int)std::min(lctx.cparams.n_ctx, lctx.cparams.n_ubatch);
  10479. int n_past = lctx.cparams.n_ctx - n_tokens;
  10480. llama_token token = llama_token_bos(&lctx.model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  10481. ggml_cgraph * gf = llama_build_graph(lctx, llama_batch_get_one(&token, n_tokens, n_past, 0), true);
  10482. // initialize scheduler with the worst-case graph
  10483. ggml_backend_sched_reset(lctx.sched);
  10484. if (!ggml_backend_sched_reserve(lctx.sched, gf)) {
  10485. LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
  10486. }
  10487. }
  10488. }
  10489. //
  10490. // tokenizer
  10491. //
  10492. static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
  10493. return vocab.type;
  10494. }
  10495. static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
  10496. GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
  10497. return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL;
  10498. }
  10499. static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
  10500. GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
  10501. return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNKNOWN;
  10502. }
  10503. static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
  10504. GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
  10505. return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_CONTROL;
  10506. }
  10507. static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
  10508. GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
  10509. return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_BYTE;
  10510. }
  10511. static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id) {
  10512. GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
  10513. return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_USER_DEFINED;
  10514. }
  10515. static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
  10516. GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
  10517. GGML_ASSERT(llama_is_byte_token(vocab, id));
  10518. const auto & token_data = vocab.id_to_token.at(id);
  10519. switch (llama_vocab_get_type(vocab)) {
  10520. case LLAMA_VOCAB_TYPE_SPM: {
  10521. auto buf = token_data.text.substr(3, 2);
  10522. return strtol(buf.c_str(), NULL, 16);
  10523. }
  10524. case LLAMA_VOCAB_TYPE_BPE: {
  10525. GGML_ASSERT(false);
  10526. return unicode_utf8_to_byte(token_data.text); // TODO: why is this here after GGML_ASSERT?
  10527. }
  10528. case LLAMA_VOCAB_TYPE_WPM: {
  10529. GGML_ASSERT(false);
  10530. }
  10531. default:
  10532. GGML_ASSERT(false);
  10533. }
  10534. }
  10535. static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
  10536. GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
  10537. static const char * hex = "0123456789ABCDEF";
  10538. switch (llama_vocab_get_type(vocab)) {
  10539. case LLAMA_VOCAB_TYPE_SPM: {
  10540. const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
  10541. auto token = vocab.token_to_id.find(buf);
  10542. if (token != vocab.token_to_id.end()) {
  10543. return (*token).second;
  10544. }
  10545. // Try to fall back to just the byte as a string
  10546. const char buf2[2] = { (char)ch, 0 };
  10547. return vocab.token_to_id.at(buf2);
  10548. }
  10549. case LLAMA_VOCAB_TYPE_WPM:
  10550. case LLAMA_VOCAB_TYPE_BPE: {
  10551. return vocab.token_to_id.at(unicode_byte_to_utf8(ch));
  10552. }
  10553. default:
  10554. GGML_ASSERT(false);
  10555. }
  10556. }
  10557. static void llama_escape_whitespace(std::string & text) {
  10558. replace_all(text, " ", "\xe2\x96\x81");
  10559. }
  10560. static void llama_unescape_whitespace(std::string & word) {
  10561. replace_all(word, "\xe2\x96\x81", " ");
  10562. }
  10563. struct llm_symbol {
  10564. using index = int;
  10565. index prev;
  10566. index next;
  10567. const char * text;
  10568. size_t n;
  10569. };
  10570. static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
  10571. // SPM tokenizer
  10572. // original implementation:
  10573. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  10574. struct llm_bigram_spm {
  10575. struct comparator {
  10576. bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
  10577. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  10578. }
  10579. };
  10580. using queue_storage = std::vector<llm_bigram_spm>;
  10581. using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
  10582. llm_symbol::index left;
  10583. llm_symbol::index right;
  10584. float score;
  10585. size_t size;
  10586. };
  10587. struct llm_tokenizer_spm {
  10588. llm_tokenizer_spm(const llama_vocab & vocab) : vocab(vocab) {}
  10589. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  10590. // split string into utf8 chars
  10591. int index = 0;
  10592. size_t offs = 0;
  10593. while (offs < text.size()) {
  10594. llm_symbol sym;
  10595. size_t len = utf8_len(text[offs]);
  10596. sym.text = text.c_str() + offs;
  10597. sym.n = std::min(len, text.size() - offs);
  10598. offs += sym.n;
  10599. sym.prev = index - 1;
  10600. sym.next = offs == text.size() ? -1 : index + 1;
  10601. index++;
  10602. symbols.emplace_back(sym);
  10603. }
  10604. // seed the work queue with all possible 2-character tokens.
  10605. for (size_t i = 1; i < symbols.size(); ++i) {
  10606. try_add_bigram(i - 1, i);
  10607. }
  10608. // keep substituting the highest frequency pairs for as long as we can.
  10609. while (!work_queue.empty()) {
  10610. auto bigram = work_queue.top();
  10611. work_queue.pop();
  10612. auto & left_sym = symbols[bigram.left];
  10613. auto & right_sym = symbols[bigram.right];
  10614. // if one of the symbols already got merged, skip it.
  10615. if (left_sym.n == 0 || right_sym.n == 0 ||
  10616. left_sym.n + right_sym.n != bigram.size) {
  10617. continue;
  10618. }
  10619. // merge the right sym into the left one
  10620. left_sym.n += right_sym.n;
  10621. right_sym.n = 0;
  10622. //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  10623. // remove the right sym from the chain
  10624. left_sym.next = right_sym.next;
  10625. if (right_sym.next >= 0) {
  10626. symbols[right_sym.next].prev = bigram.left;
  10627. }
  10628. // find more substitutions
  10629. try_add_bigram(left_sym.prev, bigram.left);
  10630. try_add_bigram(bigram.left, left_sym.next);
  10631. }
  10632. for (int i = 0; i != -1; i = symbols[i].next) {
  10633. auto & symbol = symbols[i];
  10634. resegment(symbol, output);
  10635. }
  10636. }
  10637. private:
  10638. void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
  10639. auto text = std::string(symbol.text, symbol.n);
  10640. auto token = vocab.token_to_id.find(text);
  10641. // Do we need to support is_unused?
  10642. if (token != vocab.token_to_id.end()) {
  10643. output.push_back((*token).second);
  10644. return;
  10645. }
  10646. const auto p = rev_merge.find(text);
  10647. if (p == rev_merge.end()) {
  10648. // output any symbols that did not form tokens as bytes.
  10649. output.reserve(output.size() + symbol.n);
  10650. for (int j = 0; j < (int)symbol.n; ++j) {
  10651. llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
  10652. output.push_back(token_id);
  10653. }
  10654. return;
  10655. }
  10656. resegment(symbols[p->second.first], output);
  10657. resegment(symbols[p->second.second], output);
  10658. }
  10659. void try_add_bigram(int left, int right) {
  10660. if (left == -1 || right == -1) {
  10661. return;
  10662. }
  10663. const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
  10664. auto token = vocab.token_to_id.find(text);
  10665. if (token == vocab.token_to_id.end()) {
  10666. return;
  10667. }
  10668. if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
  10669. return;
  10670. }
  10671. const auto & tok_data = vocab.id_to_token[(*token).second];
  10672. llm_bigram_spm bigram;
  10673. bigram.left = left;
  10674. bigram.right = right;
  10675. bigram.score = tok_data.score;
  10676. bigram.size = text.size();
  10677. work_queue.push(bigram);
  10678. // Do we need to support is_unused?
  10679. rev_merge[text] = std::make_pair(left, right);
  10680. }
  10681. const llama_vocab & vocab;
  10682. std::vector<llm_symbol> symbols;
  10683. llm_bigram_spm::queue work_queue;
  10684. std::map<std::string, std::pair<int, int>> rev_merge;
  10685. };
  10686. // BPE tokenizer
  10687. // adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
  10688. // tried to simplify unicode stuff, so most likely does not work 100% correctly!
  10689. // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
  10690. struct llm_bigram_bpe {
  10691. struct comparator {
  10692. bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
  10693. return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
  10694. }
  10695. };
  10696. using queue_storage = std::vector<llm_bigram_bpe>;
  10697. using queue = std::priority_queue<llm_bigram_bpe, queue_storage, comparator>;
  10698. llm_symbol::index left;
  10699. llm_symbol::index right;
  10700. std::string text;
  10701. int rank;
  10702. size_t size;
  10703. };
  10704. struct llm_tokenizer_bpe {
  10705. llm_tokenizer_bpe(const llama_vocab & vocab): vocab(vocab) {}
  10706. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  10707. int final_prev_index = -1;
  10708. bool ignore_merges = false;
  10709. std::vector<std::string> word_collection;
  10710. switch (vocab.type) {
  10711. case LLAMA_VOCAB_TYPE_BPE:
  10712. switch (vocab.type_pre) {
  10713. case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
  10714. ignore_merges = true;
  10715. word_collection = unicode_regex_split(text, {
  10716. // original regex from tokenizer.json
  10717. //"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
  10718. // adapted: https://github.com/ggerganov/llama.cpp/pull/6920#issuecomment-2080233989
  10719. "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
  10720. });
  10721. break;
  10722. case LLAMA_VOCAB_PRE_TYPE_DBRX:
  10723. case LLAMA_VOCAB_PRE_TYPE_SMAUG:
  10724. word_collection = unicode_regex_split(text, {
  10725. // same as llama3
  10726. "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
  10727. });
  10728. break;
  10729. case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM:
  10730. word_collection = unicode_regex_split(text, {
  10731. "[\r\n]",
  10732. "\\s?[A-Za-zµÀ-ÖØ-öø-ƺƼ-ƿDŽ-ʓʕ-ʯͰ-ͳͶͷͻ-ͽͿΆΈ-ΊΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-ՖႠ-ჅᎠ-Ᏽᏸ-ᏽᲐ-ᲺᲽ-Ჿᴀ-ᴫᵫ-ᵷᵹ-ᶚḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙὛὝὟ-ώᾀ-ᾴᾶ-ᾼιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼℂℇℊ-ℓℕℙ-ℝℤΩℨK-ℭℯ-ℴℹℼ-ℿⅅ-ⅉⅎↃↄⰀ-ⱻⱾ-ⳤⳫ-ⳮⳲⳳꙀ-ꙭꚀ-ꚛꜢ-ꝯꝱ-ꞇꞋ-ꞎꭰ-ꮿff-stﬓ-ﬗA-Za-z𐐀-𐑏𐒰-𐓓𐓘-𐓻𐲀-𐲲𐳀-𐳲𑢠-𑣟𞤀-𞥃]+",
  10733. "\\s?[!-/:-~!-/:-~‘-‟ -。]+",
  10734. "\\s+$",
  10735. "[一-龥ࠀ-一가-퟿]+",
  10736. "\\p{N}+",
  10737. });
  10738. break;
  10739. case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER:
  10740. word_collection = unicode_regex_split(text, {
  10741. "[\r\n]",
  10742. "\\s?\\p{L}+",
  10743. "\\s?\\p{P}+",
  10744. "[一-龥ࠀ-一가-퟿]+",
  10745. "\\p{N}",
  10746. });
  10747. break;
  10748. case LLAMA_VOCAB_PRE_TYPE_FALCON:
  10749. word_collection = unicode_regex_split(text, {
  10750. "[\\p{P}\\$\\+<=>\\^~\\|]+",
  10751. "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
  10752. "[0-9][0-9][0-9]",
  10753. });
  10754. break;
  10755. case LLAMA_VOCAB_PRE_TYPE_MPT:
  10756. // TODO: MPT pre-tokenization regexes are unknown
  10757. // the following are close, but not exact. run the following:
  10758. // ./bin/test-tokenizer-0 ../models/ggml-vocab-mpt.gguf
  10759. GGML_ASSERT("MPT pre-tokenization regexes are unknown - fixes needed");
  10760. word_collection = unicode_regex_split(text, {
  10761. "\\s?\\p{L}+",
  10762. "\\s?\\p{P}+",
  10763. "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
  10764. });
  10765. break;
  10766. case LLAMA_VOCAB_PRE_TYPE_STARCODER:
  10767. case LLAMA_VOCAB_PRE_TYPE_REFACT:
  10768. case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
  10769. word_collection = unicode_regex_split(text, {
  10770. "\\p{N}",
  10771. "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
  10772. });
  10773. break;
  10774. case LLAMA_VOCAB_PRE_TYPE_GPT2:
  10775. case LLAMA_VOCAB_PRE_TYPE_OLMO:
  10776. word_collection = unicode_regex_split(text, {
  10777. "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
  10778. });
  10779. break;
  10780. case LLAMA_VOCAB_PRE_TYPE_STABLELM2:
  10781. case LLAMA_VOCAB_PRE_TYPE_QWEN2:
  10782. word_collection = unicode_regex_split(text, {
  10783. // original regex from tokenizer.json
  10784. // "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
  10785. "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
  10786. });
  10787. break;
  10788. default:
  10789. // default regex for BPE tokenization pre-processing
  10790. word_collection = unicode_regex_split(text, {
  10791. "[\\p{P}\\$\\+<=>\\^~\\|]+",
  10792. "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
  10793. "\\p{N}+",
  10794. "[0-9][0-9][0-9]",
  10795. });
  10796. break;
  10797. }
  10798. break;
  10799. default:
  10800. GGML_ASSERT(false);
  10801. break;
  10802. }
  10803. symbols_final.clear();
  10804. for (auto & word : word_collection) {
  10805. work_queue = llm_bigram_bpe::queue();
  10806. symbols.clear();
  10807. int index = 0;
  10808. size_t offset = 0;
  10809. if (ignore_merges && vocab.token_to_id.find(word) != vocab.token_to_id.end()) {
  10810. symbols.emplace_back(llm_symbol{-1, -1, word.c_str(), word.size()});
  10811. offset = word.size();
  10812. }
  10813. while (offset < word.size()) {
  10814. llm_symbol sym;
  10815. size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
  10816. sym.text = word.c_str() + offset;
  10817. sym.n = char_len;
  10818. offset += sym.n;
  10819. sym.prev = index - 1;
  10820. sym.next = offset == word.size() ? -1 : index + 1;
  10821. index++;
  10822. symbols.emplace_back(sym);
  10823. }
  10824. for (size_t i = 1; i < symbols.size(); ++i) {
  10825. add_new_bigram(i - 1, i);
  10826. }
  10827. // build token(s)
  10828. while (!work_queue.empty()) {
  10829. auto bigram = work_queue.top();
  10830. work_queue.pop();
  10831. auto & left_symbol = symbols[bigram.left];
  10832. auto & right_symbol = symbols[bigram.right];
  10833. if (left_symbol.n == 0 || right_symbol.n == 0) {
  10834. continue;
  10835. }
  10836. std::string left_token = std::string(left_symbol.text, left_symbol.n);
  10837. std::string right_token = std::string(right_symbol.text, right_symbol.n);
  10838. if (left_token + right_token != bigram.text) {
  10839. continue; // Skip this bigram if it's outdated
  10840. }
  10841. // merge the right sym into the left one
  10842. left_symbol.n += right_symbol.n;
  10843. right_symbol.n = 0;
  10844. // remove the right sym from the chain
  10845. left_symbol.next = right_symbol.next;
  10846. if (right_symbol.next >= 0) {
  10847. symbols[right_symbol.next].prev = bigram.left;
  10848. }
  10849. add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
  10850. add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
  10851. }
  10852. // add the finished tokens to the final list keeping correct order for next and prev
  10853. for (auto & sym : symbols) {
  10854. if (sym.n > 0) {
  10855. sym.prev = final_prev_index;
  10856. sym.next = -1;
  10857. if (final_prev_index != -1) {
  10858. symbols_final[final_prev_index].next = symbols_final.size();
  10859. }
  10860. symbols_final.emplace_back(sym);
  10861. final_prev_index = symbols_final.size() - 1;
  10862. }
  10863. }
  10864. }
  10865. symbols = symbols_final;
  10866. if (!symbols.empty()) {
  10867. for (int i = 0; i != -1; i = symbols[i].next) {
  10868. auto & symbol = symbols[i];
  10869. if (symbol.n == 0) {
  10870. continue;
  10871. }
  10872. const std::string str = std::string(symbol.text, symbol.n);
  10873. const auto token = vocab.token_to_id.find(str);
  10874. if (token == vocab.token_to_id.end()) {
  10875. for (auto j = str.begin(); j != str.end(); ++j) {
  10876. std::string byte_str(1, *j);
  10877. auto token_multibyte = vocab.token_to_id.find(byte_str);
  10878. if (token_multibyte == vocab.token_to_id.end()) {
  10879. throw std::runtime_error("ERROR: byte not found in vocab");
  10880. }
  10881. output.push_back((*token_multibyte).second);
  10882. }
  10883. } else {
  10884. output.push_back((*token).second);
  10885. }
  10886. }
  10887. }
  10888. }
  10889. private:
  10890. void add_new_bigram(int left, int right) {
  10891. if (left == -1 || right == -1) {
  10892. return;
  10893. }
  10894. std::string left_token = std::string(symbols[left].text, symbols[left].n);
  10895. std::string right_token = std::string(symbols[right].text, symbols[right].n);
  10896. int rank_found = -1;
  10897. rank_found = vocab.find_bpe_rank(left_token, right_token);
  10898. if (rank_found < 0) {
  10899. return;
  10900. }
  10901. llm_bigram_bpe bigram;
  10902. bigram.left = left;
  10903. bigram.right = right;
  10904. bigram.text = left_token + right_token;
  10905. bigram.size = left_token.size() + right_token.size();
  10906. bigram.rank = rank_found;
  10907. work_queue.push(bigram);
  10908. }
  10909. const llama_vocab & vocab;
  10910. std::vector<llm_symbol> symbols;
  10911. std::vector<llm_symbol> symbols_final;
  10912. llm_bigram_bpe::queue work_queue;
  10913. };
  10914. struct llm_tokenizer_wpm {
  10915. llm_tokenizer_wpm(const llama_vocab & vocab): vocab(vocab) {}
  10916. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  10917. const auto & token_map = vocab.token_to_id;
  10918. // normalize and split by whitespace
  10919. std::vector<std::string> words = preprocess(text);
  10920. // bos token prepended already
  10921. // find the longest tokens that form the words
  10922. for (const std::string &word : words) {
  10923. // skip empty words
  10924. if (word.size() == 0) {
  10925. continue;
  10926. }
  10927. // prepend phantom space
  10928. const std::string word1 = "\xe2\x96\x81" + word;
  10929. const int n = word1.size();
  10930. const size_t current_tokens = output.size();
  10931. // we're at the start of a new word
  10932. // move through character position in word
  10933. for (int i = 0; i < n; ++i) {
  10934. // loop through possible match length
  10935. bool match = false;
  10936. for (int j = n; j > i; j--) {
  10937. auto it = token_map.find(word1.substr(i, j - i));
  10938. if (it != token_map.end()) {
  10939. output.push_back(it->second);
  10940. match = true;
  10941. i = j - 1;
  10942. break;
  10943. }
  10944. }
  10945. if (!match) { // discard all
  10946. output.resize(current_tokens);
  10947. break; // and discard next tokens
  10948. }
  10949. }
  10950. // we didn't find any matches for this word
  10951. if (current_tokens == output.size()) {
  10952. output.push_back(vocab.special_unk_id);
  10953. }
  10954. }
  10955. }
  10956. std::vector<std::string> preprocess(const std::string & text) {
  10957. const std::vector<uint32_t> cpts_nfd = unicode_cpts_normalize_nfd(unicode_cpts_from_utf8(text));
  10958. std::vector<std::string> words(1, "");
  10959. for (const char32_t cpt : cpts_nfd) {
  10960. const auto flags = unicode_cpt_flags(cpt);
  10961. if (flags.is_whitespace) {
  10962. if (words.back().size()) { // finish previous word if any
  10963. words.emplace_back();
  10964. }
  10965. continue;
  10966. }
  10967. assert (!flags.is_separator);
  10968. if (cpt == 0 || cpt == 0xFFFD || flags.is_control) {
  10969. continue;
  10970. }
  10971. const std::string s = unicode_cpt_to_utf8(unicode_tolower(cpt));
  10972. if (flags.is_punctuation || ( cpt < 0x7F && flags.is_symbol ) || is_chinese_char(cpt)) {
  10973. if (words.back().size()) { // finish previous word if any
  10974. words.emplace_back();
  10975. }
  10976. words.back() = s; // single char word
  10977. words.emplace_back(); // start a new word
  10978. } else {
  10979. words.back() += s; // append char to word
  10980. }
  10981. }
  10982. if (!words.back().size()) {
  10983. words.pop_back();
  10984. }
  10985. return words;
  10986. }
  10987. static bool is_chinese_char(uint32_t cpt) {
  10988. return
  10989. (cpt >= 0x04E00 && cpt <= 0x09FFF) ||
  10990. (cpt >= 0x03400 && cpt <= 0x04DBF) ||
  10991. (cpt >= 0x20000 && cpt <= 0x2A6DF) ||
  10992. (cpt >= 0x2A700 && cpt <= 0x2B73F) ||
  10993. (cpt >= 0x2B740 && cpt <= 0x2B81F) ||
  10994. (cpt >= 0x2B920 && cpt <= 0x2CEAF) || // this should be 0x2B820 but in hf rust code it is 0x2B920
  10995. (cpt >= 0x0F900 && cpt <= 0x0FAFF) ||
  10996. (cpt >= 0x2F800 && cpt <= 0x2FA1F);
  10997. //(cpt >= 0x3000 && cpt <= 0x303F) ||
  10998. //(cpt >= 0xFF00 && cpt <= 0xFFEF);
  10999. }
  11000. const llama_vocab & vocab;
  11001. };
  11002. typedef enum FRAGMENT_BUFFER_VARIANT_TYPE {
  11003. FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
  11004. FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
  11005. } FRAGMENT_BUFFER_VARIANT_TYPE;
  11006. struct fragment_buffer_variant {
  11007. fragment_buffer_variant(llama_vocab::id _token)
  11008. :
  11009. type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
  11010. token(_token),
  11011. raw_text(_dummy),
  11012. offset(0),
  11013. length(0) {}
  11014. fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
  11015. :
  11016. type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
  11017. token((llama_vocab::id) - 1),
  11018. raw_text(_raw_text),
  11019. offset(_offset),
  11020. length(_length){
  11021. GGML_ASSERT(_offset >= 0);
  11022. GGML_ASSERT(_length >= 1);
  11023. GGML_ASSERT(offset + length <= raw_text.length());
  11024. }
  11025. const FRAGMENT_BUFFER_VARIANT_TYPE type;
  11026. const llama_vocab::id token;
  11027. const std::string _dummy;
  11028. const std::string & raw_text;
  11029. const uint64_t offset;
  11030. const uint64_t length;
  11031. };
  11032. // #define PRETOKENIZERDEBUG
  11033. static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer) {
  11034. // for each special token
  11035. for (const llama_vocab::id special_id : vocab.cache_special_tokens) {
  11036. const auto & data = vocab.id_to_token[special_id];
  11037. const auto & special_token = data.text;
  11038. // for each text fragment
  11039. std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
  11040. while (it != buffer.end()) {
  11041. auto & fragment = (*it);
  11042. // if a fragment is text ( not yet processed )
  11043. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  11044. auto & raw_text = fragment.raw_text;
  11045. auto raw_text_base_offset = fragment.offset;
  11046. auto raw_text_base_length = fragment.length;
  11047. // loop over the text
  11048. while (true) {
  11049. // find the first occurrence of a given special token in this fragment
  11050. // passing offset argument only limit the "search area" but match coordinates
  11051. // are still relative to the source full raw_text
  11052. auto match = raw_text.find(special_token, raw_text_base_offset);
  11053. // no occurrences found, stop processing this fragment for a given special token
  11054. if (match == std::string::npos) break;
  11055. // check if match is within bounds of offset <-> length
  11056. if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
  11057. #ifdef PRETOKENIZERDEBUG
  11058. LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  11059. #endif
  11060. auto source = std::distance(buffer.begin(), it);
  11061. // if match is further than base offset
  11062. // then we have some text to the left of it
  11063. if (match > raw_text_base_offset) {
  11064. // left
  11065. const int64_t left_reminder_offset = raw_text_base_offset + 0;
  11066. int64_t left_reminder_length = match - raw_text_base_offset;
  11067. if (data.attr & LLAMA_TOKEN_ATTR_LSTRIP) {
  11068. while (left_reminder_length > 0 && isspace(raw_text[left_reminder_offset + left_reminder_length - 1])) {
  11069. left_reminder_length--;
  11070. }
  11071. }
  11072. if (left_reminder_length > 0) {
  11073. buffer.emplace_after(it, raw_text, left_reminder_offset, left_reminder_length);
  11074. it++;
  11075. }
  11076. #ifdef PRETOKENIZERDEBUG
  11077. LLAMA_LOG_WARN("FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
  11078. #endif
  11079. }
  11080. // special token
  11081. buffer.emplace_after(it, special_id);
  11082. it++;
  11083. // right
  11084. if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
  11085. int64_t right_reminder_offset = match + special_token.length();
  11086. int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
  11087. if (data.attr & LLAMA_TOKEN_ATTR_RSTRIP) {
  11088. while (right_reminder_length > 0 && isspace(raw_text[right_reminder_offset])) {
  11089. right_reminder_offset++;
  11090. right_reminder_length--;
  11091. }
  11092. }
  11093. if (right_reminder_length > 0) {
  11094. buffer.emplace_after(it, raw_text, right_reminder_offset, right_reminder_length);
  11095. it++;
  11096. }
  11097. #ifdef PRETOKENIZERDEBUG
  11098. LLAMA_LOG_WARN("FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
  11099. #endif
  11100. if (source == 0) {
  11101. buffer.erase_after(buffer.before_begin());
  11102. } else {
  11103. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  11104. }
  11105. // repeat for the right side
  11106. raw_text_base_offset = right_reminder_offset;
  11107. raw_text_base_length = right_reminder_length;
  11108. #ifdef PRETOKENIZERDEBUG
  11109. LLAMA_LOG_WARN("RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
  11110. #endif
  11111. } else {
  11112. if (source == 0) {
  11113. buffer.erase_after(buffer.before_begin());
  11114. } else {
  11115. buffer.erase_after(std::next(buffer.begin(), (source-1)));
  11116. }
  11117. break;
  11118. }
  11119. }
  11120. }
  11121. it++;
  11122. }
  11123. }
  11124. }
  11125. static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool add_special, bool parse_special) {
  11126. std::vector<llama_vocab::id> output;
  11127. std::forward_list<fragment_buffer_variant> fragment_buffer;
  11128. if (!raw_text.empty()) {
  11129. fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
  11130. if (parse_special) tokenizer_st_partition(vocab, fragment_buffer);
  11131. }
  11132. switch (vocab.type) {
  11133. case LLAMA_VOCAB_TYPE_SPM:
  11134. {
  11135. // OG tokenizer behavior:
  11136. //
  11137. // tokenizer.encode('', add_special_tokens=True) returns [1]
  11138. // tokenizer.encode('', add_special_tokens=False) returns []
  11139. bool is_prev_special = false;
  11140. if (add_special && vocab.special_add_bos != 0) {
  11141. GGML_ASSERT(vocab.special_bos_id != -1);
  11142. output.push_back(vocab.special_bos_id);
  11143. is_prev_special = true;
  11144. }
  11145. for (const auto & fragment : fragment_buffer) {
  11146. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  11147. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  11148. if (vocab.add_space_prefix) {
  11149. if (!output.size() || is_prev_special) { // prefix with space if first token
  11150. raw_text = " " + raw_text;
  11151. }
  11152. }
  11153. #ifdef PRETOKENIZERDEBUG
  11154. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  11155. #endif
  11156. llm_tokenizer_spm tokenizer(vocab);
  11157. llama_escape_whitespace(raw_text);
  11158. tokenizer.tokenize(raw_text, output);
  11159. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  11160. output.push_back(fragment.token);
  11161. is_prev_special = true;
  11162. }
  11163. }
  11164. if (add_special && vocab.special_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) {
  11165. LLAMA_LOG_WARN(
  11166. "%s: Added a BOS token to the prompt as specified by the model but the prompt "
  11167. "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
  11168. "Are you sure this is what you want?\n", __FUNCTION__);
  11169. }
  11170. if (add_special && vocab.special_add_eos == 1) {
  11171. GGML_ASSERT(vocab.special_eos_id != -1);
  11172. output.push_back(vocab.special_eos_id);
  11173. }
  11174. } break;
  11175. case LLAMA_VOCAB_TYPE_BPE:
  11176. {
  11177. if (add_special && vocab.special_add_bos != 0) {
  11178. GGML_ASSERT(vocab.special_bos_id != -1);
  11179. output.push_back(vocab.special_bos_id);
  11180. }
  11181. for (const auto & fragment : fragment_buffer) {
  11182. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  11183. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  11184. #ifdef PRETOKENIZERDEBUG
  11185. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  11186. #endif
  11187. llm_tokenizer_bpe tokenizer(vocab);
  11188. tokenizer.tokenize(raw_text, output);
  11189. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  11190. output.push_back(fragment.token);
  11191. }
  11192. }
  11193. if (add_special && vocab.special_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) {
  11194. LLAMA_LOG_WARN(
  11195. "%s: Added a BOS token to the prompt as specified by the model but the prompt "
  11196. "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
  11197. "Are you sure this is what you want?\n", __FUNCTION__);
  11198. }
  11199. if (add_special && vocab.special_add_eos == 1) {
  11200. GGML_ASSERT(vocab.special_add_eos != -1);
  11201. output.push_back(vocab.special_eos_id);
  11202. }
  11203. } break;
  11204. case LLAMA_VOCAB_TYPE_WPM:
  11205. {
  11206. if (add_special) {
  11207. GGML_ASSERT(vocab.special_cls_id != -1);
  11208. output.push_back(vocab.special_cls_id);
  11209. }
  11210. for (const auto & fragment : fragment_buffer) {
  11211. if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
  11212. auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
  11213. #ifdef PRETOKENIZERDEBUG
  11214. LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
  11215. #endif
  11216. llm_tokenizer_wpm tokenizer(vocab);
  11217. tokenizer.tokenize(raw_text, output);
  11218. } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
  11219. output.push_back(fragment.token);
  11220. }
  11221. }
  11222. if (add_special) {
  11223. GGML_ASSERT(vocab.special_sep_id != -1);
  11224. output.push_back(vocab.special_sep_id);
  11225. }
  11226. } break;
  11227. case LLAMA_VOCAB_TYPE_NONE:
  11228. GGML_ASSERT(false);
  11229. }
  11230. return output;
  11231. }
  11232. //
  11233. // grammar - internal
  11234. //
  11235. // Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
  11236. // pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
  11237. std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
  11238. const std::string & src,
  11239. llama_partial_utf8 partial_start) {
  11240. static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
  11241. const char * pos = src.c_str();
  11242. std::vector<uint32_t> code_points;
  11243. // common english strings have the same number of codepoints and bytes. `+ 1` for the terminating 0.
  11244. code_points.reserve(src.size() + 1);
  11245. uint32_t value = partial_start.value;
  11246. int n_remain = partial_start.n_remain;
  11247. // continue previous decode, if applicable
  11248. while (*pos != 0 && n_remain > 0) {
  11249. uint8_t next_byte = static_cast<uint8_t>(*pos);
  11250. if ((next_byte >> 6) != 2) {
  11251. // invalid sequence, abort
  11252. code_points.push_back(0);
  11253. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, -1 });
  11254. }
  11255. value = (value << 6) + (next_byte & 0x3F);
  11256. ++pos;
  11257. --n_remain;
  11258. }
  11259. if (partial_start.n_remain > 0 && n_remain == 0) {
  11260. code_points.push_back(value);
  11261. }
  11262. // decode any subsequent utf-8 sequences, which may end in an incomplete one
  11263. while (*pos != 0) {
  11264. uint8_t first_byte = static_cast<uint8_t>(*pos);
  11265. uint8_t highbits = first_byte >> 4;
  11266. n_remain = lookup[highbits] - 1;
  11267. if (n_remain < 0) {
  11268. // invalid sequence, abort
  11269. code_points.clear();
  11270. code_points.push_back(0);
  11271. return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, n_remain });
  11272. }
  11273. uint8_t mask = (1 << (7 - n_remain)) - 1;
  11274. value = first_byte & mask;
  11275. ++pos;
  11276. while (*pos != 0 && n_remain > 0) {
  11277. value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
  11278. ++pos;
  11279. --n_remain;
  11280. }
  11281. if (n_remain == 0) {
  11282. code_points.push_back(value);
  11283. }
  11284. }
  11285. code_points.push_back(0);
  11286. return std::make_pair(std::move(code_points), llama_partial_utf8{ value, n_remain });
  11287. }
  11288. // returns true iff pos points to the end of one of the definitions of a rule
  11289. static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
  11290. switch (pos->type) {
  11291. case LLAMA_GRETYPE_END: return true; // NOLINT
  11292. case LLAMA_GRETYPE_ALT: return true; // NOLINT
  11293. default: return false;
  11294. }
  11295. }
  11296. // returns true iff chr satisfies the char range at pos (regular or inverse range)
  11297. // asserts that pos is pointing to a char range element
  11298. static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
  11299. const llama_grammar_element * pos,
  11300. const uint32_t chr) {
  11301. bool found = false;
  11302. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR || pos->type == LLAMA_GRETYPE_CHAR_ANY;
  11303. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); // NOLINT
  11304. do {
  11305. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  11306. // inclusive range, e.g. [a-z]
  11307. found = found || (pos->value <= chr && chr <= pos[1].value);
  11308. pos += 2;
  11309. } else if (pos->type == LLAMA_GRETYPE_CHAR_ANY) {
  11310. // Any character matches "."
  11311. found = true;
  11312. pos += 1;
  11313. } else {
  11314. // exact char match, e.g. [a] or "a"
  11315. found = found || pos->value == chr;
  11316. pos += 1;
  11317. }
  11318. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  11319. return std::make_pair(found == is_positive_char, pos);
  11320. }
  11321. // returns true iff some continuation of the given partial UTF-8 sequence could satisfy the char
  11322. // range at pos (regular or inverse range)
  11323. // asserts that pos is pointing to a char range element
  11324. static bool llama_grammar_match_partial_char(
  11325. const llama_grammar_element * pos,
  11326. const llama_partial_utf8 partial_utf8) {
  11327. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR || pos->type == LLAMA_GRETYPE_CHAR_ANY;
  11328. GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  11329. uint32_t partial_value = partial_utf8.value;
  11330. int n_remain = partial_utf8.n_remain;
  11331. // invalid sequence or 7-bit char split across 2 bytes (overlong)
  11332. if (n_remain < 0 || (n_remain == 1 && partial_value < 2)) {
  11333. return false;
  11334. }
  11335. // range of possible code points this partial UTF-8 sequence could complete to
  11336. uint32_t low = partial_value << (n_remain * 6);
  11337. uint32_t high = low | ((1 << (n_remain * 6)) - 1);
  11338. if (low == 0) {
  11339. if (n_remain == 2) {
  11340. low = 1 << 11;
  11341. } else if (n_remain == 3) {
  11342. low = 1 << 16;
  11343. }
  11344. }
  11345. do {
  11346. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  11347. // inclusive range, e.g. [a-z]
  11348. if (pos->value <= high && low <= pos[1].value) {
  11349. return is_positive_char;
  11350. }
  11351. pos += 2;
  11352. } else if (pos->type == LLAMA_GRETYPE_CHAR_ANY) {
  11353. // Any character matches "."
  11354. return true;
  11355. } else {
  11356. // exact char match, e.g. [a] or "a"
  11357. if (low <= pos->value && pos->value <= high) {
  11358. return is_positive_char;
  11359. }
  11360. pos += 1;
  11361. }
  11362. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  11363. return !is_positive_char;
  11364. }
  11365. // transforms a grammar pushdown stack into N possible stacks, all ending
  11366. // at a character range (terminal element)
  11367. static void llama_grammar_advance_stack(
  11368. const std::vector<std::vector<llama_grammar_element>> & rules,
  11369. const std::vector<const llama_grammar_element *> & stack,
  11370. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  11371. if (stack.empty()) {
  11372. if (std::find(new_stacks.begin(), new_stacks.end(), stack) == new_stacks.end()) {
  11373. new_stacks.emplace_back(stack);
  11374. }
  11375. return;
  11376. }
  11377. const llama_grammar_element * pos = stack.back();
  11378. switch (pos->type) {
  11379. case LLAMA_GRETYPE_RULE_REF: {
  11380. const size_t rule_id = static_cast<size_t>(pos->value);
  11381. const llama_grammar_element * subpos = rules[rule_id].data();
  11382. do {
  11383. // init new stack without the top (pos)
  11384. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  11385. if (!llama_grammar_is_end_of_sequence(pos + 1)) {
  11386. // if this rule ref is followed by another element, add that to stack
  11387. new_stack.push_back(pos + 1);
  11388. }
  11389. if (!llama_grammar_is_end_of_sequence(subpos)) {
  11390. // if alternate is nonempty, add to stack
  11391. new_stack.push_back(subpos);
  11392. }
  11393. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  11394. while (!llama_grammar_is_end_of_sequence(subpos)) {
  11395. // scan to end of alternate def
  11396. subpos++;
  11397. }
  11398. if (subpos->type == LLAMA_GRETYPE_ALT) {
  11399. // there's another alternate def of this rule to process
  11400. subpos++;
  11401. } else {
  11402. break;
  11403. }
  11404. } while (true);
  11405. break;
  11406. }
  11407. case LLAMA_GRETYPE_CHAR:
  11408. case LLAMA_GRETYPE_CHAR_NOT:
  11409. case LLAMA_GRETYPE_CHAR_ANY:
  11410. if (std::find(new_stacks.begin(), new_stacks.end(), stack) == new_stacks.end()) {
  11411. // only add the stack if it's not a duplicate of one we already have
  11412. new_stacks.emplace_back(stack);
  11413. }
  11414. break;
  11415. default:
  11416. // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
  11417. // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
  11418. // those
  11419. GGML_ASSERT(false);
  11420. }
  11421. }
  11422. // takes a set of possible pushdown stacks on a grammar, which are required to
  11423. // be positioned at a character range (see `llama_grammar_advance_stack`), and
  11424. // produces the N possible stacks if the given char is accepted at those
  11425. // positions
  11426. void llama_grammar_accept(
  11427. const std::vector<std::vector<llama_grammar_element>> & rules,
  11428. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  11429. const uint32_t chr,
  11430. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  11431. new_stacks.clear();
  11432. for (const auto & stack : stacks) {
  11433. if (stack.empty()) {
  11434. continue;
  11435. }
  11436. auto match = llama_grammar_match_char(stack.back(), chr);
  11437. if (match.first) {
  11438. const llama_grammar_element * pos = match.second;
  11439. // update top of stack to next element, if any
  11440. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  11441. if (!llama_grammar_is_end_of_sequence(pos)) {
  11442. new_stack.push_back(pos);
  11443. }
  11444. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  11445. }
  11446. }
  11447. }
  11448. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  11449. const std::vector<std::vector<llama_grammar_element>> & rules,
  11450. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  11451. const std::vector<llama_grammar_candidate> & candidates);
  11452. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
  11453. const std::vector<std::vector<llama_grammar_element>> & rules,
  11454. const std::vector<const llama_grammar_element *> & stack,
  11455. const std::vector<llama_grammar_candidate> & candidates) {
  11456. std::vector<llama_grammar_candidate> rejects;
  11457. rejects.reserve(candidates.size());
  11458. if (stack.empty()) {
  11459. for (const auto & tok : candidates) {
  11460. if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
  11461. rejects.push_back(tok);
  11462. }
  11463. }
  11464. return rejects;
  11465. }
  11466. const llama_grammar_element * stack_pos = stack.back();
  11467. std::vector<llama_grammar_candidate> next_candidates;
  11468. next_candidates.reserve(candidates.size());
  11469. for (const auto & tok : candidates) {
  11470. if (*tok.code_points == 0) {
  11471. // reached end of full codepoints in token, reject iff it ended in a partial sequence
  11472. // that cannot satisfy this position in grammar
  11473. if (tok.partial_utf8.n_remain != 0 &&
  11474. !llama_grammar_match_partial_char(stack_pos, tok.partial_utf8)) {
  11475. rejects.push_back(tok);
  11476. }
  11477. } else if (llama_grammar_match_char(stack_pos, *tok.code_points).first) {
  11478. next_candidates.push_back({ tok.index, tok.code_points + 1, tok.partial_utf8 });
  11479. } else {
  11480. rejects.push_back(tok);
  11481. }
  11482. }
  11483. const auto * stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
  11484. // update top of stack to next element, if any
  11485. std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
  11486. if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
  11487. stack_after.push_back(stack_pos_after);
  11488. }
  11489. std::vector<std::vector<const llama_grammar_element *>> next_stacks;
  11490. llama_grammar_advance_stack(rules, stack_after, next_stacks);
  11491. auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
  11492. for (const auto & tok : next_rejects) {
  11493. rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
  11494. }
  11495. return rejects;
  11496. }
  11497. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  11498. const std::vector<std::vector<llama_grammar_element>> & rules,
  11499. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  11500. const std::vector<llama_grammar_candidate> & candidates) {
  11501. GGML_ASSERT(!stacks.empty()); // REVIEW
  11502. if (candidates.empty()) {
  11503. return std::vector<llama_grammar_candidate>();
  11504. }
  11505. auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
  11506. for (size_t i = 1, size = stacks.size(); i < size; ++i) {
  11507. rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
  11508. }
  11509. return rejects;
  11510. }
  11511. static bool llama_grammar_detect_left_recursion(
  11512. const std::vector<std::vector<llama_grammar_element>> & rules,
  11513. size_t rule_index,
  11514. std::vector<bool> * rules_visited,
  11515. std::vector<bool> * rules_in_progress,
  11516. std::vector<bool> * rules_may_be_empty) {
  11517. if ((*rules_in_progress)[rule_index]) {
  11518. return true;
  11519. }
  11520. (*rules_in_progress)[rule_index] = true;
  11521. const std::vector<llama_grammar_element> & rule = rules[rule_index];
  11522. // First check if the rule might produce the empty string. This could be done combined with the second
  11523. // step but it's more readable as two steps.
  11524. bool at_rule_start = true;
  11525. for (size_t i = 0; i < rule.size(); i++) {
  11526. if (llama_grammar_is_end_of_sequence(&rule[i])) {
  11527. if (at_rule_start) {
  11528. (*rules_may_be_empty)[rule_index] = true;
  11529. break;
  11530. }
  11531. at_rule_start = true;
  11532. } else {
  11533. at_rule_start = false;
  11534. }
  11535. }
  11536. // Second, recurse into leftmost nonterminals (or next-leftmost as long as the previous nonterminal may
  11537. // be empty)
  11538. bool recurse_into_nonterminal = true;
  11539. for (size_t i = 0; i < rule.size(); i++) {
  11540. if (rule[i].type == LLAMA_GRETYPE_RULE_REF && recurse_into_nonterminal) {
  11541. if (llama_grammar_detect_left_recursion(rules, (size_t)rule[i].value, rules_visited, rules_in_progress, rules_may_be_empty)) {
  11542. return true;
  11543. }
  11544. if (!((*rules_may_be_empty)[(size_t)rule[i].value])) {
  11545. recurse_into_nonterminal = false;
  11546. }
  11547. } else if (llama_grammar_is_end_of_sequence(&rule[i])) {
  11548. recurse_into_nonterminal = true;
  11549. } else {
  11550. recurse_into_nonterminal = false;
  11551. }
  11552. }
  11553. (*rules_in_progress)[rule_index] = false;
  11554. (*rules_visited)[rule_index] = true;
  11555. return false;
  11556. }
  11557. //
  11558. // grammar - external
  11559. //
  11560. struct llama_grammar * llama_grammar_init(
  11561. const llama_grammar_element ** rules,
  11562. size_t n_rules,
  11563. size_t start_rule_index) {
  11564. const llama_grammar_element * pos;
  11565. // copy rule definitions into vectors
  11566. std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
  11567. for (size_t i = 0; i < n_rules; i++) {
  11568. for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
  11569. vec_rules[i].push_back(*pos);
  11570. }
  11571. vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
  11572. }
  11573. // Check for left recursion
  11574. std::vector<bool> rules_visited(n_rules);
  11575. std::vector<bool> rules_in_progress(n_rules);
  11576. std::vector<bool> rules_may_be_empty(n_rules);
  11577. for (size_t i = 0; i < n_rules; i++) {
  11578. if (rules_visited[i]) {
  11579. continue;
  11580. }
  11581. if (llama_grammar_detect_left_recursion(vec_rules, i, &rules_visited, &rules_in_progress, &rules_may_be_empty)) {
  11582. throw std::runtime_error(format("unsupported grammar, left recursion detected for nonterminal at index %zu", i));
  11583. }
  11584. }
  11585. // loop over alternates of start rule to build initial stacks
  11586. std::vector<std::vector<const llama_grammar_element *>> stacks;
  11587. pos = vec_rules[start_rule_index].data();
  11588. do {
  11589. std::vector<const llama_grammar_element *> stack;
  11590. if (!llama_grammar_is_end_of_sequence(pos)) {
  11591. // if alternate is nonempty, add to stack
  11592. stack.push_back(pos);
  11593. }
  11594. llama_grammar_advance_stack(vec_rules, stack, stacks);
  11595. while (!llama_grammar_is_end_of_sequence(pos)) {
  11596. // scan to end of alternate def
  11597. pos++;
  11598. }
  11599. if (pos->type == LLAMA_GRETYPE_ALT) {
  11600. // there's another alternate def of this rule to process
  11601. pos++;
  11602. } else {
  11603. break;
  11604. }
  11605. } while (true);
  11606. // Important: vec_rules has to be moved here, not copied, because stacks contains
  11607. // pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
  11608. // then the pointers would be invalidated when the local vec_rules goes out of scope.
  11609. return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} };
  11610. }
  11611. void llama_grammar_free(struct llama_grammar * grammar) {
  11612. delete grammar;
  11613. }
  11614. struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar) {
  11615. llama_grammar * result = new llama_grammar{ grammar->rules, grammar->stacks, grammar->partial_utf8 };
  11616. // redirect elements in stacks to point to new rules
  11617. for (size_t is = 0; is < result->stacks.size(); is++) {
  11618. for (size_t ie = 0; ie < result->stacks[is].size(); ie++) {
  11619. for (size_t ir0 = 0; ir0 < grammar->rules.size(); ir0++) {
  11620. for (size_t ir1 = 0; ir1 < grammar->rules[ir0].size(); ir1++) {
  11621. if (grammar->stacks[is][ie] == &grammar->rules[ir0][ir1]) {
  11622. result->stacks[is][ie] = &result->rules[ir0][ir1];
  11623. }
  11624. }
  11625. }
  11626. }
  11627. }
  11628. return result;
  11629. }
  11630. //
  11631. // sampling
  11632. //
  11633. void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
  11634. if (seed == LLAMA_DEFAULT_SEED) {
  11635. seed = time(NULL);
  11636. }
  11637. ctx->rng.seed(seed);
  11638. }
  11639. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  11640. GGML_ASSERT(candidates->size > 0);
  11641. const int64_t t_start_sample_us = ggml_time_us();
  11642. // Sort the logits in descending order
  11643. if (!candidates->sorted) {
  11644. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  11645. return a.logit > b.logit;
  11646. });
  11647. candidates->sorted = true;
  11648. }
  11649. float max_l = candidates->data[0].logit;
  11650. float cum_sum = 0.0f;
  11651. for (size_t i = 0; i < candidates->size; ++i) {
  11652. float p = expf(candidates->data[i].logit - max_l);
  11653. candidates->data[i].p = p;
  11654. cum_sum += p;
  11655. }
  11656. for (size_t i = 0; i < candidates->size; ++i) {
  11657. candidates->data[i].p /= cum_sum;
  11658. }
  11659. if (ctx) {
  11660. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11661. }
  11662. }
  11663. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int32_t k, size_t min_keep) {
  11664. // TODO: move bucket sort to separate function so that top_p/tail_free/typical/softmax first is equally fast
  11665. // if (k >= (int32_t)candidates->size) {
  11666. // return;
  11667. // }
  11668. const int64_t t_start_sample_us = ggml_time_us();
  11669. if (k <= 0) {
  11670. k = candidates->size;
  11671. }
  11672. k = std::max(k, (int) min_keep);
  11673. k = std::min(k, (int) candidates->size);
  11674. // Sort scores in descending order
  11675. if (!candidates->sorted) {
  11676. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  11677. return a.logit > b.logit;
  11678. };
  11679. if (k <= 128) {
  11680. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  11681. } else {
  11682. constexpr int nbuckets = 128;
  11683. constexpr float bucket_low = -10.0f;
  11684. constexpr float bucket_high = 10.0f;
  11685. constexpr float bucket_scale = nbuckets/(bucket_high - bucket_low);
  11686. constexpr float bucker_inter = -bucket_low * bucket_scale;
  11687. std::vector<int> bucket_idx(candidates->size);
  11688. std::vector<int> histo(nbuckets, 0);
  11689. for (int i = 0; i < (int)candidates->size; ++i) {
  11690. const float val = candidates->data[i].logit;
  11691. int ib = int(bucket_scale * val + bucker_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low);
  11692. ib = std::max(0, std::min(nbuckets-1, ib));
  11693. bucket_idx[i] = ib;
  11694. ++histo[ib];
  11695. }
  11696. int nhave = 0;
  11697. int ib = nbuckets - 1;
  11698. for ( ; ib >= 0; --ib) {
  11699. nhave += histo[ib];
  11700. if (nhave >= k) break;
  11701. }
  11702. std::vector<llama_token_data> tmp_tokens(nhave);
  11703. auto ptr = tmp_tokens.data();
  11704. std::vector<llama_token_data*> bucket_ptrs;
  11705. bucket_ptrs.reserve(nbuckets - ib);
  11706. for (int j = nbuckets - 1; j >= ib; --j) {
  11707. bucket_ptrs.push_back(ptr);
  11708. ptr += histo[j];
  11709. }
  11710. for (int i = 0; i < (int)candidates->size; ++i) {
  11711. int j = bucket_idx[i];
  11712. if (j >= ib) {
  11713. *bucket_ptrs[nbuckets-1-j]++ = candidates->data[i];
  11714. }
  11715. }
  11716. ptr = tmp_tokens.data();
  11717. int ndone = 0;
  11718. for (int j = nbuckets-1; j > ib; --j) {
  11719. std::sort(ptr, ptr + histo[j], comp);
  11720. ptr += histo[j];
  11721. ndone += histo[j];
  11722. }
  11723. std::partial_sort(ptr, ptr + k - ndone, ptr + histo[ib], comp);
  11724. std::memcpy(candidates->data, tmp_tokens.data(), k*sizeof(llama_token_data));
  11725. }
  11726. candidates->sorted = true;
  11727. }
  11728. candidates->size = k;
  11729. if (ctx) {
  11730. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11731. }
  11732. }
  11733. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  11734. if (p >= 1.0f) {
  11735. return;
  11736. }
  11737. llama_sample_softmax(ctx, candidates);
  11738. const int64_t t_start_sample_us = ggml_time_us();
  11739. // Compute the cumulative probabilities
  11740. float cum_sum = 0.0f;
  11741. size_t last_idx = candidates->size;
  11742. for (size_t i = 0; i < candidates->size; ++i) {
  11743. cum_sum += candidates->data[i].p;
  11744. // Check if the running sum is at least p or if we have kept at least min_keep tokens
  11745. // we set the last index to i+1 to indicate that the current iterate should be included in the set
  11746. if (cum_sum >= p && i + 1 >= min_keep) {
  11747. last_idx = i + 1;
  11748. break;
  11749. }
  11750. }
  11751. // Resize the output vector to keep only the top-p tokens
  11752. candidates->size = last_idx;
  11753. if (ctx) {
  11754. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11755. }
  11756. }
  11757. void llama_sample_min_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  11758. if (p <= 0.0f || !candidates->size) {
  11759. return;
  11760. }
  11761. const int64_t t_start_sample_us = ggml_time_us();
  11762. bool min_p_applied = false;
  11763. // if the candidates aren't sorted, try the unsorted implementation first
  11764. if (!candidates->sorted) {
  11765. std::vector<llama_token_data> filtered_tokens;
  11766. float max_logit = -FLT_MAX;
  11767. for (size_t i = 0; i < candidates->size; ++i) {
  11768. max_logit = std::max(max_logit, candidates->data[i].logit);
  11769. }
  11770. const float min_logit = max_logit + logf(p); // min logit for p_i >= p * p_max
  11771. for (size_t i = 0; i < candidates->size; ++i) {
  11772. if (candidates->data[i].logit >= min_logit) {
  11773. filtered_tokens.push_back(candidates->data[i]);
  11774. }
  11775. }
  11776. // if we have enough values the operation was a success
  11777. if (filtered_tokens.size() >= min_keep) {
  11778. memcpy(candidates->data, filtered_tokens.data(), filtered_tokens.size()*sizeof(llama_token_data));
  11779. candidates->size = filtered_tokens.size();
  11780. min_p_applied = true;
  11781. }
  11782. }
  11783. // if the candidates are sorted or the unsorted implementation failed, use this implementation
  11784. if (!min_p_applied) {
  11785. // Sort the logits in descending order
  11786. if (!candidates->sorted) {
  11787. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  11788. return a.logit > b.logit;
  11789. });
  11790. candidates->sorted = true;
  11791. }
  11792. const float min_logit = candidates->data[0].logit + logf(p); // min logit for p_i >= p * p_max
  11793. size_t i = 1; // first token always matches
  11794. for (; i < candidates->size; ++i) {
  11795. if (candidates->data[i].logit < min_logit && i >= min_keep) {
  11796. break; // prob too small
  11797. }
  11798. }
  11799. // Resize the output vector to keep only the matching tokens
  11800. candidates->size = i;
  11801. }
  11802. if (ctx) {
  11803. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11804. }
  11805. }
  11806. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  11807. if (z >= 1.0f || candidates->size <= 2) {
  11808. return;
  11809. }
  11810. llama_sample_softmax(nullptr, candidates);
  11811. const int64_t t_start_sample_us = ggml_time_us();
  11812. // Compute the first and second derivatives
  11813. std::vector<float> first_derivatives(candidates->size - 1);
  11814. std::vector<float> second_derivatives(candidates->size - 2);
  11815. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  11816. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  11817. }
  11818. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  11819. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  11820. }
  11821. // Calculate absolute value of second derivatives
  11822. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  11823. second_derivatives[i] = std::abs(second_derivatives[i]);
  11824. }
  11825. // Normalize the second derivatives
  11826. {
  11827. const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  11828. if (second_derivatives_sum > 1e-6f) {
  11829. for (float & value : second_derivatives) {
  11830. value /= second_derivatives_sum;
  11831. }
  11832. } else {
  11833. for (float & value : second_derivatives) {
  11834. value = 1.0f / second_derivatives.size();
  11835. }
  11836. }
  11837. }
  11838. float cum_sum = 0.0f;
  11839. size_t last_idx = candidates->size;
  11840. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  11841. cum_sum += second_derivatives[i];
  11842. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  11843. if (cum_sum > z && i >= min_keep) {
  11844. last_idx = i;
  11845. break;
  11846. }
  11847. }
  11848. // Resize the output vector to keep only the tokens above the tail location
  11849. candidates->size = last_idx;
  11850. if (ctx) {
  11851. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11852. }
  11853. }
  11854. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  11855. // Reference implementation:
  11856. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  11857. if (p >= 1.0f) {
  11858. return;
  11859. }
  11860. // Compute the softmax of logits and calculate entropy
  11861. llama_sample_softmax(nullptr, candidates);
  11862. const int64_t t_start_sample_us = ggml_time_us();
  11863. float entropy = 0.0f;
  11864. for (size_t i = 0; i < candidates->size; ++i) {
  11865. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  11866. }
  11867. // Compute the absolute difference between negative log probability and entropy for each candidate
  11868. std::vector<float> shifted_scores;
  11869. for (size_t i = 0; i < candidates->size; ++i) {
  11870. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  11871. shifted_scores.push_back(shifted_score);
  11872. }
  11873. // Sort tokens based on the shifted_scores and their corresponding indices
  11874. std::vector<size_t> indices(candidates->size);
  11875. std::iota(indices.begin(), indices.end(), 0);
  11876. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  11877. return shifted_scores[a] < shifted_scores[b];
  11878. });
  11879. // Compute the cumulative probabilities
  11880. float cum_sum = 0.0f;
  11881. size_t last_idx = indices.size();
  11882. for (size_t i = 0; i < indices.size(); ++i) {
  11883. size_t idx = indices[i];
  11884. cum_sum += candidates->data[idx].p;
  11885. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  11886. if (cum_sum > p && i >= min_keep - 1) {
  11887. last_idx = i + 1;
  11888. break;
  11889. }
  11890. }
  11891. // Resize the output vector to keep only the locally typical tokens
  11892. std::vector<llama_token_data> new_candidates;
  11893. for (size_t i = 0; i < last_idx; ++i) {
  11894. size_t idx = indices[i];
  11895. new_candidates.push_back(candidates->data[idx]);
  11896. }
  11897. // Replace the data in candidates with the new_candidates data
  11898. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  11899. candidates->size = new_candidates.size();
  11900. candidates->sorted = false;
  11901. if (ctx) {
  11902. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11903. }
  11904. }
  11905. void llama_sample_entropy(struct llama_context * ctx, llama_token_data_array * candidates_p, float min_temp, float max_temp, float exponent_val) {
  11906. const int64_t t_start_sample_us = ggml_time_us();
  11907. // no need to do anything if there is only one (or zero) candidates
  11908. if(candidates_p->size <= 1) {
  11909. return;
  11910. }
  11911. // Calculate maximum possible entropy
  11912. float max_entropy = -logf(1.0f / candidates_p->size);
  11913. llama_sample_softmax(nullptr, candidates_p);
  11914. // Calculate entropy of the softmax probabilities
  11915. float entropy = 0.0f;
  11916. for (size_t i = 0; i < candidates_p->size; ++i) {
  11917. float prob = candidates_p->data[i].p;
  11918. if (prob > 0.0f) { // Ensure no log(0)
  11919. entropy -= prob * logf(prob);
  11920. }
  11921. }
  11922. // Normalize the entropy (max_entropy cannot be 0 here because we checked candidates_p->size != 1 above)
  11923. float normalized_entropy = entropy / max_entropy;
  11924. // Map the normalized entropy to the desired temperature range using the power function
  11925. float dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent_val);
  11926. #ifdef DEBUG
  11927. LLAMA_LOG_INFO("Your text maxtemp value is: %f\n", max_temp);
  11928. LLAMA_LOG_INFO("Entropy: %f\n", entropy);
  11929. LLAMA_LOG_INFO("Max Possible Entropy: %f\n", max_entropy);
  11930. LLAMA_LOG_INFO("Normalized Entropy: %f\n", normalized_entropy);
  11931. LLAMA_LOG_INFO("Exponent: %f\n", exponent_val);
  11932. LLAMA_LOG_INFO("Dynamic Temperature (dyn_temp): %f\n", dyn_temp);
  11933. #endif
  11934. // Apply the dynamically calculated temperature scaling
  11935. for (size_t i = 0; i < candidates_p->size; ++i) {
  11936. candidates_p->data[i].logit /= dyn_temp;
  11937. }
  11938. // Re-compute softmax probabilities after scaling logits with dynamic temperature
  11939. double max_l_double = candidates_p->data[0].logit;
  11940. double cum_sum_double = 0.0;
  11941. for (size_t i = 0; i < candidates_p->size; ++i) {
  11942. double p = exp(candidates_p->data[i].logit - max_l_double);
  11943. candidates_p->data[i].p = p; // Store the scaled probability
  11944. cum_sum_double += p;
  11945. }
  11946. for (size_t i = 0; i < candidates_p->size; ++i) {
  11947. candidates_p->data[i].p /= cum_sum_double; // Re-normalize the probabilities
  11948. }
  11949. #ifdef DEBUG
  11950. // Print the updated top 25 probabilities after temperature scaling
  11951. LLAMA_LOG_INFO("\nUpdated Top 25 Probabilities After Dynamic Temperature Scaling (in percentages):\n");
  11952. for (size_t i = 0; i < 25 && i < candidates_p->size; ++i) {
  11953. LLAMA_LOG_INFO("Token %zu: %f%%\n", i + 1, candidates_p->data[i].p * 100.0f);
  11954. }
  11955. #endif
  11956. if (ctx) {
  11957. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11958. }
  11959. }
  11960. void llama_sample_temp(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  11961. const int64_t t_start_sample_us = ggml_time_us();
  11962. for (size_t i = 0; i < candidates_p->size; ++i) {
  11963. candidates_p->data[i].logit /= temp;
  11964. }
  11965. if (ctx) {
  11966. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  11967. }
  11968. }
  11969. void llama_sample_repetition_penalties(
  11970. struct llama_context * ctx,
  11971. llama_token_data_array * candidates,
  11972. const llama_token * last_tokens,
  11973. size_t penalty_last_n,
  11974. float penalty_repeat,
  11975. float penalty_freq,
  11976. float penalty_present) {
  11977. if (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f)) {
  11978. return;
  11979. }
  11980. const int64_t t_start_sample_us = ggml_time_us();
  11981. // Create a frequency map to count occurrences of each token in last_tokens
  11982. std::unordered_map<llama_token, int> token_count;
  11983. for (size_t i = 0; i < penalty_last_n; ++i) {
  11984. token_count[last_tokens[i]]++;
  11985. }
  11986. // Apply frequency and presence penalties to the candidates
  11987. for (size_t i = 0; i < candidates->size; ++i) {
  11988. const auto token_iter = token_count.find(candidates->data[i].id);
  11989. if (token_iter == token_count.end()) {
  11990. continue;
  11991. }
  11992. const int count = token_iter->second;
  11993. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  11994. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  11995. if (candidates->data[i].logit <= 0) {
  11996. candidates->data[i].logit *= penalty_repeat;
  11997. } else {
  11998. candidates->data[i].logit /= penalty_repeat;
  11999. }
  12000. candidates->data[i].logit -= float(count) * penalty_freq + float(count > 0) * penalty_present;
  12001. }
  12002. candidates->sorted = false;
  12003. if (ctx) {
  12004. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  12005. }
  12006. }
  12007. void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
  12008. GGML_ASSERT(ctx);
  12009. int64_t t_start_sample_us = ggml_time_us();
  12010. bool allow_eog = false;
  12011. for (const auto & stack : grammar->stacks) {
  12012. if (stack.empty()) {
  12013. allow_eog = true;
  12014. break;
  12015. }
  12016. }
  12017. std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
  12018. candidates_decoded.reserve(candidates->size);
  12019. std::vector<llama_grammar_candidate> candidates_grammar;
  12020. candidates_grammar.reserve(candidates->size);
  12021. for (size_t i = 0; i < candidates->size; ++i) {
  12022. const llama_token id = candidates->data[i].id;
  12023. const std::string & piece = ctx->model.vocab.cache_token_to_piece.at(id);
  12024. if (llama_token_is_eog(&ctx->model, id)) {
  12025. if (!allow_eog) {
  12026. candidates->data[i].logit = -INFINITY;
  12027. }
  12028. } else if (piece.empty() || piece[0] == 0) {
  12029. candidates->data[i].logit = -INFINITY;
  12030. } else {
  12031. candidates_decoded.push_back(decode_utf8(piece, grammar->partial_utf8));
  12032. candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second });
  12033. }
  12034. }
  12035. const auto rejects = llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
  12036. for (const auto & reject : rejects) {
  12037. candidates->data[reject.index].logit = -INFINITY;
  12038. }
  12039. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  12040. }
  12041. static void llama_log_softmax(float * array, size_t size) {
  12042. float max_l = *std::max_element(array, array + size);
  12043. float sum = 0.f;
  12044. for (size_t i = 0; i < size; ++i) {
  12045. float p = expf(array[i] - max_l);
  12046. sum += p;
  12047. array[i] = p;
  12048. }
  12049. for (size_t i = 0; i < size; ++i) {
  12050. array[i] = logf(array[i] / sum);
  12051. }
  12052. }
  12053. void llama_sample_apply_guidance(
  12054. struct llama_context * ctx,
  12055. float * logits,
  12056. float * logits_guidance,
  12057. float scale) {
  12058. GGML_ASSERT(ctx);
  12059. const auto t_start_sample_us = ggml_time_us();
  12060. const auto n_vocab = llama_n_vocab(llama_get_model(ctx));
  12061. llama_log_softmax(logits, n_vocab);
  12062. llama_log_softmax(logits_guidance, n_vocab);
  12063. for (int i = 0; i < n_vocab; ++i) {
  12064. auto & l = logits[i];
  12065. const auto & g = logits_guidance[i];
  12066. l = scale * (l - g) + g;
  12067. }
  12068. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  12069. }
  12070. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int32_t m, float * mu) {
  12071. GGML_ASSERT(ctx);
  12072. auto N = float(llama_n_vocab(llama_get_model(ctx)));
  12073. int64_t t_start_sample_us;
  12074. t_start_sample_us = ggml_time_us();
  12075. llama_sample_softmax(nullptr, candidates);
  12076. // Estimate s_hat using the most probable m tokens
  12077. float s_hat = 0.0;
  12078. float sum_ti_bi = 0.0;
  12079. float sum_ti_sq = 0.0;
  12080. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  12081. float t_i = logf(float(i + 2) / float(i + 1));
  12082. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  12083. sum_ti_bi += t_i * b_i;
  12084. sum_ti_sq += t_i * t_i;
  12085. }
  12086. s_hat = sum_ti_bi / sum_ti_sq;
  12087. // Compute k from the estimated s_hat and target surprise value
  12088. float epsilon_hat = s_hat - 1;
  12089. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  12090. // Sample the next word X using top-k sampling
  12091. llama_sample_top_k(nullptr, candidates, int(k), 1);
  12092. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  12093. llama_token X = llama_sample_token(ctx, candidates);
  12094. t_start_sample_us = ggml_time_us();
  12095. // Compute error as the difference between observed surprise and target surprise value
  12096. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  12097. return candidate.id == X;
  12098. }));
  12099. float observed_surprise = -log2f(candidates->data[X_idx].p);
  12100. float e = observed_surprise - tau;
  12101. // Update mu using the learning rate and error
  12102. *mu = *mu - eta * e;
  12103. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  12104. return X;
  12105. }
  12106. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  12107. int64_t t_start_sample_us;
  12108. t_start_sample_us = ggml_time_us();
  12109. llama_sample_softmax(ctx, candidates);
  12110. // Truncate the words with surprise values greater than mu
  12111. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  12112. return -log2f(candidate.p) > *mu;
  12113. }));
  12114. if (candidates->size == 0) {
  12115. candidates->size = 1;
  12116. }
  12117. if (ctx) {
  12118. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  12119. }
  12120. // Normalize the probabilities of the remaining words
  12121. llama_sample_softmax(ctx, candidates);
  12122. // Sample the next word X from the remaining words
  12123. llama_token X = llama_sample_token(ctx, candidates);
  12124. t_start_sample_us = ggml_time_us();
  12125. // Compute error as the difference between observed surprise and target surprise value
  12126. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  12127. return candidate.id == X;
  12128. }));
  12129. float observed_surprise = -log2f(candidates->data[X_idx].p);
  12130. float e = observed_surprise - tau;
  12131. // Update mu using the learning rate and error
  12132. *mu = *mu - eta * e;
  12133. if (ctx) {
  12134. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  12135. }
  12136. return X;
  12137. }
  12138. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  12139. const int64_t t_start_sample_us = ggml_time_us();
  12140. // Find max element
  12141. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  12142. return a.logit < b.logit;
  12143. });
  12144. llama_token result = max_iter->id;
  12145. if (ctx) {
  12146. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  12147. ctx->n_sample++;
  12148. }
  12149. return result;
  12150. }
  12151. llama_token llama_sample_token_with_rng(struct llama_context * ctx, llama_token_data_array * candidates, std::mt19937 & rng) {
  12152. GGML_ASSERT(ctx);
  12153. const int64_t t_start_sample_us = ggml_time_us();
  12154. llama_sample_softmax(nullptr, candidates);
  12155. std::vector<float> probs;
  12156. probs.reserve(candidates->size);
  12157. for (size_t i = 0; i < candidates->size; ++i) {
  12158. probs.push_back(candidates->data[i].p);
  12159. }
  12160. std::discrete_distribution<> dist(probs.begin(), probs.end());
  12161. int idx = dist(rng);
  12162. llama_token result = candidates->data[idx].id;
  12163. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  12164. ctx->n_sample++;
  12165. return result;
  12166. }
  12167. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  12168. return llama_sample_token_with_rng(ctx, candidates, ctx->rng);
  12169. }
  12170. void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
  12171. const int64_t t_start_sample_us = ggml_time_us();
  12172. if (llama_token_is_eog(&ctx->model, token)) {
  12173. for (const auto & stack : grammar->stacks) {
  12174. if (stack.empty()) {
  12175. return;
  12176. }
  12177. }
  12178. GGML_ASSERT(false);
  12179. }
  12180. const std::string & piece = ctx->model.vocab.cache_token_to_piece.at(token);
  12181. // Note terminating 0 in decoded string
  12182. const auto decoded = decode_utf8(piece, grammar->partial_utf8);
  12183. const auto & code_points = decoded.first;
  12184. std::vector<std::vector<const llama_grammar_element *>> tmp_new_stacks;
  12185. for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
  12186. llama_grammar_accept(grammar->rules, grammar->stacks, *it, tmp_new_stacks);
  12187. grammar->stacks = tmp_new_stacks;
  12188. }
  12189. grammar->partial_utf8 = decoded.second;
  12190. GGML_ASSERT(!grammar->stacks.empty());
  12191. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  12192. }
  12193. //
  12194. // quantization
  12195. //
  12196. struct quantize_state_internal {
  12197. const llama_model & model;
  12198. const llama_model_quantize_params * params;
  12199. int n_attention_wv = 0;
  12200. int n_ffn_down = 0;
  12201. int n_ffn_gate = 0;
  12202. int n_ffn_up = 0;
  12203. int i_attention_wv = 0;
  12204. int i_ffn_down = 0;
  12205. int i_ffn_gate = 0;
  12206. int i_ffn_up = 0;
  12207. int n_k_quantized = 0;
  12208. int n_fallback = 0;
  12209. bool has_imatrix = false;
  12210. // used to figure out if a model shares tok_embd with the output weight
  12211. bool has_output = false;
  12212. quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
  12213. : model(model)
  12214. , params(params)
  12215. {}
  12216. };
  12217. static void llama_tensor_dequantize_internal(
  12218. struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
  12219. const size_t nelements, const int nthread
  12220. ) {
  12221. if (output.size() < nelements) {
  12222. output.resize(nelements);
  12223. }
  12224. float * f32_output = (float *) output.data();
  12225. ggml_type_traits_t qtype;
  12226. if (ggml_is_quantized(tensor->type)) {
  12227. qtype = ggml_internal_get_type_traits(tensor->type);
  12228. if (qtype.to_float == NULL) {
  12229. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
  12230. }
  12231. } else if (tensor->type != GGML_TYPE_F16 &&
  12232. tensor->type != GGML_TYPE_BF16) {
  12233. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
  12234. }
  12235. if (nthread < 2) {
  12236. if (tensor->type == GGML_TYPE_F16) {
  12237. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
  12238. } else if (tensor->type == GGML_TYPE_BF16) {
  12239. ggml_bf16_to_fp32_row((ggml_bf16_t *)tensor->data, f32_output, nelements);
  12240. } else if (ggml_is_quantized(tensor->type)) {
  12241. qtype.to_float(tensor->data, f32_output, nelements);
  12242. } else {
  12243. GGML_ASSERT(false); // unreachable
  12244. }
  12245. return;
  12246. }
  12247. size_t block_size;
  12248. if (tensor->type == GGML_TYPE_F16 ||
  12249. tensor->type == GGML_TYPE_BF16) {
  12250. block_size = 1;
  12251. } else {
  12252. block_size = (size_t)ggml_blck_size(tensor->type);
  12253. }
  12254. size_t block_size_bytes = ggml_type_size(tensor->type);
  12255. GGML_ASSERT(nelements % block_size == 0);
  12256. size_t nblocks = nelements / block_size;
  12257. size_t blocks_per_thread = nblocks / nthread;
  12258. size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  12259. size_t in_buff_offs = 0;
  12260. size_t out_buff_offs = 0;
  12261. for (int tnum = 0; tnum < nthread; tnum++) {
  12262. size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  12263. size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
  12264. size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  12265. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  12266. if (typ == GGML_TYPE_F16) {
  12267. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  12268. } else if (typ == GGML_TYPE_BF16) {
  12269. ggml_bf16_to_fp32_row((ggml_bf16_t *)inbuf, outbuf, nels);
  12270. } else {
  12271. qtype.to_float(inbuf, outbuf, nels);
  12272. }
  12273. };
  12274. workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
  12275. in_buff_offs += thr_block_bytes;
  12276. out_buff_offs += thr_elems;
  12277. }
  12278. for (auto & w : workers) { w.join(); }
  12279. workers.clear();
  12280. }
  12281. static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
  12282. const std::string name = ggml_get_name(tensor);
  12283. // TODO: avoid hardcoded tensor names - use the TN_* constants
  12284. const llm_arch arch = qs.model.arch;
  12285. const auto tn = LLM_TN(arch);
  12286. auto use_more_bits = [](int i_layer, int num_layers) -> bool {
  12287. return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
  12288. };
  12289. const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
  12290. auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
  12291. if (n_expert > 1) {
  12292. // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but iccasionally randomly
  12293. // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
  12294. // for getting the current layer as I initially thought, and we need to resort to parsing the
  12295. // tensor name.
  12296. if (sscanf(name, "blk.%d.", &i_layer) != 1) {
  12297. throw std::runtime_error(format("Failed to determine layer for tensor %s", name));
  12298. }
  12299. if (i_layer < 0 || i_layer >= n_layer) {
  12300. throw std::runtime_error(format("Bad layer %d for tensor %s. Must be in [0, %d)", i_layer, name, n_layer));
  12301. }
  12302. }
  12303. return std::make_pair(i_layer, n_layer);
  12304. };
  12305. // for arches that share the same tensor between the token embeddings and the output, we quantize the token embeddings
  12306. // with the quantization of the output tensor
  12307. if (name == tn(LLM_TENSOR_OUTPUT, "weight") || (!qs.has_output && name == tn(LLM_TENSOR_TOKEN_EMBD, "weight"))) {
  12308. if (qs.params->output_tensor_type < GGML_TYPE_COUNT) {
  12309. new_type = qs.params->output_tensor_type;
  12310. } else {
  12311. int nx = tensor->ne[0];
  12312. if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
  12313. new_type = GGML_TYPE_Q8_0;
  12314. }
  12315. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
  12316. ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ||
  12317. ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
  12318. new_type = GGML_TYPE_Q5_K;
  12319. }
  12320. else if (new_type != GGML_TYPE_Q8_0) {
  12321. new_type = GGML_TYPE_Q6_K;
  12322. }
  12323. }
  12324. } else if (name == "token_embd.weight") {
  12325. if (qs.params->token_embedding_type < GGML_TYPE_COUNT) {
  12326. new_type = qs.params->token_embedding_type;
  12327. } else {
  12328. if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS ||
  12329. ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
  12330. new_type = GGML_TYPE_Q2_K;
  12331. }
  12332. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
  12333. new_type = GGML_TYPE_IQ3_S;
  12334. }
  12335. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  12336. new_type = GGML_TYPE_IQ3_S;
  12337. }
  12338. }
  12339. } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
  12340. ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
  12341. if (name.find("attn_v.weight") != std::string::npos) {
  12342. if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
  12343. else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
  12344. ++qs.i_attention_wv;
  12345. }
  12346. else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) {
  12347. new_type = GGML_TYPE_Q4_K;
  12348. }
  12349. else if (name.find("ffn_down") != std::string::npos) {
  12350. if (qs.i_ffn_down < qs.n_ffn_down/8) {
  12351. new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
  12352. }
  12353. ++qs.i_ffn_down;
  12354. }
  12355. else if (name.find("attn_output.weight") != std::string::npos) {
  12356. if (qs.model.hparams.n_expert == 8) {
  12357. new_type = GGML_TYPE_Q5_K;
  12358. } else {
  12359. if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
  12360. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
  12361. }
  12362. }
  12363. } else if (name.find("attn_v.weight") != std::string::npos) {
  12364. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
  12365. new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
  12366. }
  12367. else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
  12368. new_type = GGML_TYPE_Q4_K;
  12369. }
  12370. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  12371. new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : !qs.has_imatrix ? GGML_TYPE_IQ3_S : GGML_TYPE_IQ3_XXS;
  12372. }
  12373. else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S) && qs.model.hparams.n_gqa() >= 4) {
  12374. new_type = GGML_TYPE_Q4_K;
  12375. }
  12376. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
  12377. new_type = GGML_TYPE_Q4_K;
  12378. }
  12379. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  12380. new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  12381. }
  12382. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  12383. else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model.hparams.n_gqa() >= 4) {
  12384. new_type = GGML_TYPE_Q5_K;
  12385. }
  12386. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  12387. use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
  12388. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
  12389. if (qs.model.type == MODEL_70B) {
  12390. // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
  12391. // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
  12392. // nearly negligible increase in model size by quantizing this tensor with more bits:
  12393. if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
  12394. }
  12395. if (qs.model.hparams.n_expert == 8) {
  12396. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  12397. // TODO: explore better strategies
  12398. new_type = GGML_TYPE_Q8_0;
  12399. }
  12400. ++qs.i_attention_wv;
  12401. } else if (name.find("attn_k.weight") != std::string::npos) {
  12402. if (qs.model.hparams.n_expert == 8) {
  12403. // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
  12404. // TODO: explore better strategies
  12405. new_type = GGML_TYPE_Q8_0;
  12406. }
  12407. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
  12408. new_type = GGML_TYPE_IQ3_XXS;
  12409. }
  12410. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  12411. new_type = GGML_TYPE_IQ2_S;
  12412. }
  12413. } else if (name.find("attn_q.weight") != std::string::npos) {
  12414. if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
  12415. new_type = GGML_TYPE_IQ3_XXS;
  12416. }
  12417. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
  12418. new_type = GGML_TYPE_IQ2_S;
  12419. }
  12420. } else if (name.find("ffn_down") != std::string::npos) {
  12421. auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
  12422. int i_layer = info.first, n_layer = info.second;
  12423. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  12424. else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
  12425. if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
  12426. }
  12427. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS && !qs.has_imatrix) {
  12428. new_type = i_layer < n_layer/8 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
  12429. }
  12430. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
  12431. new_type = i_layer < n_layer/16 ? GGML_TYPE_Q5_K
  12432. : arch != LLM_ARCH_FALCON || use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q4_K
  12433. : GGML_TYPE_Q3_K;
  12434. }
  12435. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
  12436. (qs.model.hparams.n_expert == 8 && use_more_bits(i_layer, n_layer)))) {
  12437. new_type = GGML_TYPE_Q4_K;
  12438. }
  12439. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
  12440. new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
  12441. }
  12442. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
  12443. if (arch == LLM_ARCH_FALCON) {
  12444. new_type = i_layer < n_layer/16 ? GGML_TYPE_Q6_K :
  12445. use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
  12446. } else {
  12447. if (use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
  12448. }
  12449. }
  12450. else if (i_layer < n_layer/8 && (ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && !qs.has_imatrix) {
  12451. new_type = GGML_TYPE_Q5_K;
  12452. }
  12453. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
  12454. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && i_layer < n_layer/8) {
  12455. new_type = GGML_TYPE_Q5_K;
  12456. }
  12457. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || ftype == LLAMA_FTYPE_MOSTLY_Q5_0)
  12458. && qs.has_imatrix && i_layer < n_layer/8) {
  12459. // Guard against craziness in the first few ffn_down layers that can happen even with imatrix for Q4_0/Q5_0.
  12460. // We only do it when an imatrix is provided because a) we want to make sure that one can always get the
  12461. // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
  12462. new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
  12463. }
  12464. ++qs.i_ffn_down;
  12465. } else if (name.find("attn_output.weight") != std::string::npos) {
  12466. if (arch != LLM_ARCH_FALCON) {
  12467. if (qs.model.hparams.n_expert == 8) {
  12468. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
  12469. ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
  12470. ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
  12471. ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) {
  12472. new_type = GGML_TYPE_Q5_K;
  12473. }
  12474. } else {
  12475. if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
  12476. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
  12477. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ) new_type = GGML_TYPE_Q4_K;
  12478. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ) new_type = GGML_TYPE_Q5_K;
  12479. else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M ) new_type = GGML_TYPE_Q4_K;
  12480. }
  12481. } else {
  12482. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
  12483. }
  12484. }
  12485. else if (name.find("attn_qkv.weight") != std::string::npos) {
  12486. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
  12487. new_type = GGML_TYPE_Q4_K;
  12488. }
  12489. else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
  12490. else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
  12491. }
  12492. else if (name.find("ffn_gate") != std::string::npos) {
  12493. auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
  12494. int i_layer = info.first, n_layer = info.second;
  12495. if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
  12496. new_type = GGML_TYPE_IQ3_XXS;
  12497. }
  12498. ++qs.i_ffn_gate;
  12499. }
  12500. else if (name.find("ffn_up") != std::string::npos) {
  12501. auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
  12502. int i_layer = info.first, n_layer = info.second;
  12503. if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
  12504. new_type = GGML_TYPE_IQ3_XXS;
  12505. }
  12506. ++qs.i_ffn_up;
  12507. }
  12508. // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  12509. //}
  12510. // IK: let's remove this, else Q2_K is almost the same as Q3_K_S
  12511. //else if (name.find("ffn_gate") != std::string::npos || name.find("ffn_up") != std::string::npos) {
  12512. // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
  12513. //}
  12514. // This can be used to reduce the size of the Q5_K_S model.
  12515. // The associated PPL increase is fully in line with the size reduction
  12516. //else {
  12517. // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
  12518. //}
  12519. bool convert_incompatible_tensor = false;
  12520. if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
  12521. new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K || new_type == GGML_TYPE_IQ4_XS ||
  12522. new_type == GGML_TYPE_IQ2_XS || new_type == GGML_TYPE_IQ2_XXS || new_type == GGML_TYPE_IQ2_S ||
  12523. new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S || new_type == GGML_TYPE_IQ3_S ||
  12524. new_type == GGML_TYPE_IQ1_M) {
  12525. int nx = tensor->ne[0];
  12526. int ny = tensor->ne[1];
  12527. if (nx % QK_K != 0) {
  12528. LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
  12529. convert_incompatible_tensor = true;
  12530. } else {
  12531. ++qs.n_k_quantized;
  12532. }
  12533. }
  12534. if (convert_incompatible_tensor) {
  12535. switch (new_type) {
  12536. case GGML_TYPE_IQ2_XXS:
  12537. case GGML_TYPE_IQ2_XS:
  12538. case GGML_TYPE_IQ2_S:
  12539. case GGML_TYPE_IQ3_XXS:
  12540. case GGML_TYPE_IQ3_S:
  12541. case GGML_TYPE_IQ1_S:
  12542. case GGML_TYPE_IQ1_M:
  12543. case GGML_TYPE_Q2_K:
  12544. case GGML_TYPE_Q3_K:
  12545. case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
  12546. case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
  12547. case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
  12548. case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
  12549. default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
  12550. }
  12551. LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
  12552. ++qs.n_fallback;
  12553. }
  12554. return new_type;
  12555. }
  12556. static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
  12557. if (nthread < 2) {
  12558. // single-thread
  12559. size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
  12560. if (!ggml_validate_row_data(new_type, new_data, new_size)) {
  12561. throw std::runtime_error("quantized data validation failed");
  12562. }
  12563. return new_size;
  12564. }
  12565. std::mutex mutex;
  12566. int64_t counter = 0;
  12567. size_t new_size = 0;
  12568. bool valid = true;
  12569. auto compute = [&mutex, &counter, &new_size, &valid, new_type, f32_data, new_data, chunk_size,
  12570. nrows, n_per_row, imatrix]() {
  12571. const int64_t nrows_per_chunk = chunk_size / n_per_row;
  12572. size_t local_size = 0;
  12573. while (true) {
  12574. std::unique_lock<std::mutex> lock(mutex);
  12575. int64_t first_row = counter; counter += nrows_per_chunk;
  12576. if (first_row >= nrows) {
  12577. if (local_size > 0) {
  12578. new_size += local_size;
  12579. }
  12580. break;
  12581. }
  12582. lock.unlock();
  12583. const int64_t this_nrow = std::min(nrows - first_row, nrows_per_chunk);
  12584. size_t this_size = ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix);
  12585. local_size += this_size;
  12586. // validate the quantized data
  12587. const size_t row_size = ggml_row_size(new_type, n_per_row);
  12588. void * this_data = (char *) new_data + first_row * row_size;
  12589. if (!ggml_validate_row_data(new_type, this_data, this_size)) {
  12590. std::unique_lock<std::mutex> lock(mutex);
  12591. valid = false;
  12592. break;
  12593. }
  12594. }
  12595. };
  12596. for (int it = 0; it < nthread - 1; ++it) {
  12597. workers.emplace_back(compute);
  12598. }
  12599. compute();
  12600. for (auto & w : workers) { w.join(); }
  12601. workers.clear();
  12602. if (!valid) {
  12603. throw std::runtime_error("quantized data validation failed");
  12604. }
  12605. return new_size;
  12606. }
  12607. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  12608. ggml_type default_type;
  12609. llama_ftype ftype = params->ftype;
  12610. switch (params->ftype) {
  12611. case LLAMA_FTYPE_MOSTLY_Q4_0: default_type = GGML_TYPE_Q4_0; break;
  12612. case LLAMA_FTYPE_MOSTLY_Q4_1: default_type = GGML_TYPE_Q4_1; break;
  12613. case LLAMA_FTYPE_MOSTLY_Q5_0: default_type = GGML_TYPE_Q5_0; break;
  12614. case LLAMA_FTYPE_MOSTLY_Q5_1: default_type = GGML_TYPE_Q5_1; break;
  12615. case LLAMA_FTYPE_MOSTLY_Q8_0: default_type = GGML_TYPE_Q8_0; break;
  12616. case LLAMA_FTYPE_MOSTLY_F16: default_type = GGML_TYPE_F16; break;
  12617. case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break;
  12618. case LLAMA_FTYPE_ALL_F32: default_type = GGML_TYPE_F32; break;
  12619. // K-quants
  12620. case LLAMA_FTYPE_MOSTLY_Q2_K_S:
  12621. case LLAMA_FTYPE_MOSTLY_Q2_K: default_type = GGML_TYPE_Q2_K; break;
  12622. case LLAMA_FTYPE_MOSTLY_IQ3_XS: default_type = GGML_TYPE_IQ3_S; break;
  12623. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  12624. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  12625. case LLAMA_FTYPE_MOSTLY_Q3_K_L: default_type = GGML_TYPE_Q3_K; break;
  12626. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  12627. case LLAMA_FTYPE_MOSTLY_Q4_K_M: default_type = GGML_TYPE_Q4_K; break;
  12628. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  12629. case LLAMA_FTYPE_MOSTLY_Q5_K_M: default_type = GGML_TYPE_Q5_K; break;
  12630. case LLAMA_FTYPE_MOSTLY_Q6_K: default_type = GGML_TYPE_Q6_K; break;
  12631. case LLAMA_FTYPE_MOSTLY_IQ2_XXS: default_type = GGML_TYPE_IQ2_XXS; break;
  12632. case LLAMA_FTYPE_MOSTLY_IQ2_XS: default_type = GGML_TYPE_IQ2_XS; break;
  12633. case LLAMA_FTYPE_MOSTLY_IQ2_S: default_type = GGML_TYPE_IQ2_XS; break;
  12634. case LLAMA_FTYPE_MOSTLY_IQ2_M: default_type = GGML_TYPE_IQ2_S; break;
  12635. case LLAMA_FTYPE_MOSTLY_IQ3_XXS: default_type = GGML_TYPE_IQ3_XXS; break;
  12636. case LLAMA_FTYPE_MOSTLY_IQ1_S: default_type = GGML_TYPE_IQ1_S; break;
  12637. case LLAMA_FTYPE_MOSTLY_IQ1_M: default_type = GGML_TYPE_IQ1_M; break;
  12638. case LLAMA_FTYPE_MOSTLY_IQ4_NL: default_type = GGML_TYPE_IQ4_NL; break;
  12639. case LLAMA_FTYPE_MOSTLY_IQ4_XS: default_type = GGML_TYPE_IQ4_XS; break;
  12640. case LLAMA_FTYPE_MOSTLY_IQ3_S: default_type = GGML_TYPE_IQ3_S; break;
  12641. case LLAMA_FTYPE_MOSTLY_IQ3_M: default_type = GGML_TYPE_IQ3_S; break;
  12642. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  12643. }
  12644. int nthread = params->nthread;
  12645. if (nthread <= 0) {
  12646. nthread = std::thread::hardware_concurrency();
  12647. }
  12648. // mmap consistently increases speed Linux, and also increases speed on Windows with
  12649. // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
  12650. #if defined(__linux__) || defined(_WIN32)
  12651. constexpr bool use_mmap = true;
  12652. #else
  12653. constexpr bool use_mmap = false;
  12654. #endif
  12655. llama_model_kv_override * kv_overrides = nullptr;
  12656. if (params->kv_overrides) {
  12657. auto v = (std::vector<llama_model_kv_override>*)params->kv_overrides;
  12658. kv_overrides = v->data();
  12659. }
  12660. llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides);
  12661. ml.init_mappings(false); // no prefetching
  12662. llama_model model;
  12663. llm_load_arch(ml, model);
  12664. llm_load_hparams(ml, model);
  12665. struct quantize_state_internal qs(model, params);
  12666. if (params->only_copy) {
  12667. ftype = model.ftype;
  12668. }
  12669. const std::unordered_map<std::string, std::vector<float>> * imatrix_data = nullptr;
  12670. if (params->imatrix) {
  12671. imatrix_data = static_cast<const std::unordered_map<std::string, std::vector<float>>*>(params->imatrix);
  12672. if (imatrix_data) {
  12673. LLAMA_LOG_INFO("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
  12674. qs.has_imatrix = true;
  12675. }
  12676. }
  12677. const size_t align = GGUF_DEFAULT_ALIGNMENT;
  12678. struct gguf_context * ctx_out = gguf_init_empty();
  12679. // copy the KV pairs from the input file
  12680. gguf_set_kv (ctx_out, ml.meta);
  12681. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  12682. gguf_set_val_u32(ctx_out, "general.file_type", ftype);
  12683. // Remove split metadata
  12684. gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_NO).c_str());
  12685. gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str());
  12686. gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str());
  12687. if (params->kv_overrides) {
  12688. const std::vector<llama_model_kv_override> & overrides = *(const std::vector<llama_model_kv_override> *)params->kv_overrides;
  12689. for (auto & o : overrides) {
  12690. if (o.key[0] == 0) break;
  12691. if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
  12692. gguf_set_val_f32(ctx_out, o.key, o.val_f64);
  12693. } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
  12694. gguf_set_val_i32(ctx_out, o.key, o.val_i64);
  12695. } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
  12696. gguf_set_val_bool(ctx_out, o.key, o.val_bool);
  12697. } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {
  12698. gguf_set_val_str(ctx_out, o.key, o.val_str);
  12699. } else {
  12700. LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
  12701. }
  12702. }
  12703. }
  12704. for (int i = 0; i < ml.n_tensors; ++i) {
  12705. const struct ggml_tensor * meta = ml.get_tensor_meta(i);
  12706. const std::string name = ggml_get_name(meta);
  12707. // TODO: avoid hardcoded tensor names - use the TN_* constants
  12708. if (name.find("attn_v.weight") != std::string::npos ||
  12709. name.find("attn_qkv.weight") != std::string::npos) {
  12710. ++qs.n_attention_wv;
  12711. } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
  12712. qs.has_output = true;
  12713. }
  12714. }
  12715. qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer;
  12716. // sanity checks
  12717. //
  12718. // - qs.n_attention_wv == 0 for Mamba models
  12719. // - qs.n_attention_wv == model.hparams.n_layer for Transformer models
  12720. //
  12721. GGML_ASSERT((qs.n_attention_wv == 0 || qs.n_attention_wv == (int)model.hparams.n_layer) && "n_attention_wv is unexpected");
  12722. size_t total_size_org = 0;
  12723. size_t total_size_new = 0;
  12724. std::vector<std::thread> workers;
  12725. workers.reserve(nthread);
  12726. int idx = 0;
  12727. std::vector<no_init<uint8_t>> read_data;
  12728. std::vector<no_init<uint8_t>> work;
  12729. std::vector<no_init<float>> f32_conv_buf;
  12730. uint16_t n_split = 1;
  12731. // Assume split index is continuous
  12732. if (params->keep_split) {
  12733. for (int i = 0; i < ml.n_tensors; ++i) {
  12734. n_split = std::max(uint16_t(ml.get_weight(i)->idx+1), n_split);
  12735. }
  12736. }
  12737. std::vector<gguf_context*> ctx_outs(n_split, NULL);
  12738. ctx_outs[0] = ctx_out;
  12739. // populate the original tensors so we get an initial meta data
  12740. for (int i = 0; i < ml.n_tensors; ++i) {
  12741. auto weight = ml.get_weight(i);
  12742. uint16_t i_split = params->keep_split ? weight->idx : 0;
  12743. struct ggml_tensor * tensor = weight->tensor;
  12744. if (ctx_outs[i_split] == NULL) {
  12745. ctx_outs[i_split] = gguf_init_empty();
  12746. }
  12747. gguf_add_tensor(ctx_outs[i_split], tensor);
  12748. }
  12749. // Set split info if needed
  12750. if (n_split > 1) {
  12751. for (size_t i = 0; i < ctx_outs.size(); ++i) {
  12752. gguf_set_val_u16(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i);
  12753. gguf_set_val_u16(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split);
  12754. gguf_set_val_i32(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors);
  12755. }
  12756. }
  12757. int cur_split = -1;
  12758. std::ofstream fout;
  12759. auto close_ofstream = [&]() {
  12760. // Write metadata and close file handler
  12761. if (fout.is_open()) {
  12762. fout.seekp(0);
  12763. std::vector<uint8_t> data(gguf_get_meta_size(ctx_outs[cur_split]));
  12764. gguf_get_meta_data(ctx_outs[cur_split], data.data());
  12765. fout.write((const char *) data.data(), data.size());
  12766. fout.close();
  12767. }
  12768. };
  12769. auto new_ofstream = [&](int index) {
  12770. cur_split = index;
  12771. GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
  12772. std::string fname = fname_out;
  12773. if (params->keep_split) {
  12774. char split_path[PATH_MAX] = {0};
  12775. llama_split_path(split_path, sizeof(split_path), fname_out.c_str(), cur_split, n_split);
  12776. fname = std::string(split_path);
  12777. }
  12778. fout = std::ofstream(fname, std::ios::binary);
  12779. fout.exceptions(std::ofstream::failbit); // fail fast on write errors
  12780. const size_t meta_size = gguf_get_meta_size(ctx_outs[cur_split]);
  12781. // placeholder for the meta data
  12782. ::zeros(fout, meta_size);
  12783. };
  12784. const auto tn = LLM_TN(model.arch);
  12785. new_ofstream(0);
  12786. for (int i = 0; i < ml.n_tensors; ++i) {
  12787. auto weight = ml.get_weight(i);
  12788. struct ggml_tensor * tensor = weight->tensor;
  12789. if (weight->idx != cur_split && params->keep_split) {
  12790. close_ofstream();
  12791. new_ofstream(weight->idx);
  12792. }
  12793. const std::string name = ggml_get_name(tensor);
  12794. if (!ml.use_mmap) {
  12795. if (read_data.size() < ggml_nbytes(tensor)) {
  12796. read_data.resize(ggml_nbytes(tensor));
  12797. }
  12798. tensor->data = read_data.data();
  12799. }
  12800. ml.load_data_for(tensor);
  12801. LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
  12802. ++idx, ml.n_tensors,
  12803. ggml_get_name(tensor),
  12804. llama_format_tensor_shape(tensor).c_str(),
  12805. ggml_type_name(tensor->type));
  12806. // This used to be a regex, but <regex> has an extreme cost to compile times.
  12807. bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
  12808. // quantize only 2D and 3D tensors (experts)
  12809. quantize &= (ggml_n_dims(tensor) >= 2);
  12810. // do not quantize norm tensors
  12811. quantize &= name.find("_norm.weight") == std::string::npos;
  12812. quantize &= params->quantize_output_tensor || name != "output.weight";
  12813. quantize &= !params->only_copy;
  12814. // do not quantize expert gating tensors
  12815. // NOTE: can't use LLM_TN here because the layer number is not known
  12816. quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
  12817. // do not quantize positional embeddings and token types (BERT)
  12818. quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");
  12819. quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight");
  12820. // do not quantize Mamba's small yet 2D weights
  12821. // NOTE: can't use LLM_TN here because the layer number is not known
  12822. quantize &= name.find("ssm_conv1d.weight") == std::string::npos;
  12823. quantize &= name.find("ssm_x.weight") == std::string::npos;
  12824. quantize &= name.find("ssm_dt.weight") == std::string::npos;
  12825. enum ggml_type new_type;
  12826. void * new_data;
  12827. size_t new_size;
  12828. if (quantize) {
  12829. new_type = default_type;
  12830. // get more optimal quantization type based on the tensor shape, layer, etc.
  12831. if (!params->pure && ggml_is_quantized(default_type)) {
  12832. new_type = llama_tensor_get_type(qs, new_type, tensor, ftype);
  12833. }
  12834. if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) {
  12835. new_type = params->token_embedding_type;
  12836. }
  12837. if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
  12838. new_type = params->output_tensor_type;
  12839. }
  12840. // If we've decided to quantize to the same type the tensor is already
  12841. // in then there's nothing to do.
  12842. quantize = tensor->type != new_type;
  12843. }
  12844. if (!quantize) {
  12845. new_type = tensor->type;
  12846. new_data = tensor->data;
  12847. new_size = ggml_nbytes(tensor);
  12848. LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
  12849. } else {
  12850. const int64_t nelements = ggml_nelements(tensor);
  12851. const float * imatrix = nullptr;
  12852. if (imatrix_data) {
  12853. auto it = imatrix_data->find(tensor->name);
  12854. if (it == imatrix_data->end()) {
  12855. LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
  12856. } else {
  12857. if (it->second.size() == (size_t)tensor->ne[0]*tensor->ne[2]) {
  12858. imatrix = it->second.data();
  12859. } else {
  12860. LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
  12861. int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name);
  12862. // this can happen when quantizing an old mixtral model with split tensors with a new incompatible imatrix
  12863. // this is a significant error and it may be good idea to abort the process if this happens,
  12864. // since many people will miss the error and not realize that most of the model is being quantized without an imatrix
  12865. // tok_embd should be ignored in this case, since it always causes this warning
  12866. if (name != tn(LLM_TENSOR_TOKEN_EMBD, "weight")) {
  12867. throw std::runtime_error(format("imatrix size %d is different from tensor size %d for %s",
  12868. int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name));
  12869. }
  12870. }
  12871. }
  12872. }
  12873. if ((new_type == GGML_TYPE_IQ2_XXS ||
  12874. new_type == GGML_TYPE_IQ2_XS ||
  12875. new_type == GGML_TYPE_IQ2_S ||
  12876. new_type == GGML_TYPE_IQ1_S ||
  12877. (new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight")) ||
  12878. (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
  12879. LLAMA_LOG_ERROR("\n\n============================================================\n");
  12880. LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
  12881. LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");
  12882. LLAMA_LOG_ERROR("============================================================\n\n");
  12883. throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name));
  12884. }
  12885. float * f32_data;
  12886. if (tensor->type == GGML_TYPE_F32) {
  12887. f32_data = (float *) tensor->data;
  12888. } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
  12889. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
  12890. } else {
  12891. llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
  12892. f32_data = (float *) f32_conv_buf.data();
  12893. }
  12894. LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
  12895. fflush(stdout);
  12896. if (work.size() < (size_t)nelements * 4) {
  12897. work.resize(nelements * 4); // upper bound on size
  12898. }
  12899. new_data = work.data();
  12900. const int64_t n_per_row = tensor->ne[0];
  12901. const int64_t nrows = tensor->ne[1];
  12902. static const int64_t min_chunk_size = 32 * 512;
  12903. const int64_t chunk_size = n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row);
  12904. const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
  12905. const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
  12906. const int64_t nthread_use = nthread > 1 ? std::max((int64_t)1, std::min((int64_t)nthread, nchunk)) : 1;
  12907. // quantize each expert separately since they have different importance matrices
  12908. new_size = 0;
  12909. for (int64_t i03 = 0; i03 < tensor->ne[2]; ++i03) {
  12910. const float * f32_data_03 = f32_data + i03 * nelements_matrix;
  12911. void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows;
  12912. const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr;
  12913. new_size += llama_tensor_quantize_internal(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
  12914. }
  12915. LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
  12916. }
  12917. total_size_org += ggml_nbytes(tensor);
  12918. total_size_new += new_size;
  12919. // update the gguf meta data as we go
  12920. gguf_set_tensor_type(ctx_outs[cur_split], name.c_str(), new_type);
  12921. gguf_set_tensor_data(ctx_outs[cur_split], name.c_str(), new_data, new_size);
  12922. // write tensor data + padding
  12923. fout.write((const char *) new_data, new_size);
  12924. zeros(fout, GGML_PAD(new_size, align) - new_size);
  12925. }
  12926. close_ofstream();
  12927. for (auto & c:ctx_outs) {
  12928. gguf_free(c);
  12929. }
  12930. LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  12931. LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  12932. if (qs.n_fallback > 0) {
  12933. LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n",
  12934. __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
  12935. }
  12936. }
  12937. static int llama_apply_lora_from_file_internal(
  12938. const struct llama_model & model, const char * path_lora, float scale, const char * path_base_model, int n_threads
  12939. ) {
  12940. LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  12941. const int64_t t_start_lora_us = ggml_time_us();
  12942. llama_file fin(path_lora, "rb");
  12943. // verify magic and version
  12944. {
  12945. uint32_t magic = fin.read_u32();
  12946. if (magic != LLAMA_FILE_MAGIC_GGLA) {
  12947. LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
  12948. return 1;
  12949. }
  12950. uint32_t format_version = fin.read_u32();
  12951. if (format_version != 1) {
  12952. LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
  12953. return 1;
  12954. }
  12955. }
  12956. int32_t lora_r = fin.read_u32();
  12957. int32_t lora_alpha = fin.read_u32();
  12958. float scaling = scale * (float)lora_alpha / (float)lora_r;
  12959. LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  12960. // load base model
  12961. std::unique_ptr<llama_model_loader> ml;
  12962. if (path_base_model) {
  12963. LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
  12964. ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*check_tensors*/ false, /*kv_overrides*/ nullptr));
  12965. ml->init_mappings(/*prefetch*/ false); // no prefetching
  12966. }
  12967. struct tensor_meta {
  12968. std::string name;
  12969. ggml_type type;
  12970. int32_t ne[2];
  12971. size_t offset;
  12972. };
  12973. std::map<std::string, tensor_meta> tensor_meta_map;
  12974. // load all tensor meta
  12975. while (true) {
  12976. if (fin.tell() == fin.size) {
  12977. // eof
  12978. break;
  12979. }
  12980. int32_t n_dims;
  12981. int32_t name_len;
  12982. int32_t ftype;
  12983. fin.read_raw(&n_dims, sizeof(n_dims));
  12984. fin.read_raw(&name_len, sizeof(name_len));
  12985. fin.read_raw(&ftype, sizeof(ftype));
  12986. if (n_dims != 1 && n_dims != 2) {
  12987. LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
  12988. return 1;
  12989. }
  12990. int32_t ne[2] = { 1, 1 };
  12991. for (int i = 0; i < n_dims; ++i) {
  12992. fin.read_raw(&ne[i], sizeof(ne[i]));
  12993. }
  12994. std::string name;
  12995. {
  12996. GGML_ASSERT(name_len < GGML_MAX_NAME);
  12997. char buf[GGML_MAX_NAME];
  12998. fin.read_raw(buf, name_len);
  12999. name = std::string(buf, name_len);
  13000. }
  13001. // check for lora suffix
  13002. std::string lora_suffix;
  13003. if (name.length() > 6) {
  13004. lora_suffix = name.substr(name.length() - 6);
  13005. }
  13006. if (lora_suffix != ".loraA" && lora_suffix != ".loraB") {
  13007. LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  13008. return 1;
  13009. }
  13010. // tensor type
  13011. ggml_type wtype;
  13012. switch (ftype) {
  13013. case 0: wtype = GGML_TYPE_F32; break;
  13014. case 1: wtype = GGML_TYPE_F16; break;
  13015. default:
  13016. {
  13017. LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
  13018. __func__, ftype);
  13019. return 1;
  13020. }
  13021. }
  13022. // data offset
  13023. size_t offset = fin.tell();
  13024. offset = (offset + 31) & -32;
  13025. // skip tensor data
  13026. fin.seek(offset + ggml_row_size(wtype, ne[0]) * ne[1], SEEK_SET);
  13027. tensor_meta_map.emplace(name, tensor_meta{ name, wtype, { ne[0], ne[1] }, offset });
  13028. }
  13029. bool warned = false;
  13030. int n_tensors = 0;
  13031. // apply
  13032. ggml_backend_t backend_cpu = ggml_backend_cpu_init();
  13033. if (backend_cpu == nullptr) {
  13034. LLAMA_LOG_ERROR("%s: error: failed to initialize cpu backend\n", __func__);
  13035. return 1;
  13036. }
  13037. ggml_backend_cpu_set_n_threads(backend_cpu, n_threads);
  13038. std::vector<no_init<uint8_t>> read_buf;
  13039. for (const auto & it : model.tensors_by_name) {
  13040. const std::string & base_name = it.first;
  13041. ggml_tensor * model_t = it.second;
  13042. if (tensor_meta_map.find(base_name + ".loraA") == tensor_meta_map.end() ||
  13043. tensor_meta_map.find(base_name + ".loraB") == tensor_meta_map.end()) {
  13044. continue;
  13045. }
  13046. tensor_meta & metaA = tensor_meta_map.at(base_name + ".loraA");
  13047. tensor_meta & metaB = tensor_meta_map.at(base_name + ".loraB");
  13048. ggml_init_params lora_init_params = {
  13049. /* .mem_size */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
  13050. /* .mem_buffer */ nullptr,
  13051. /* .no_alloc */ true,
  13052. };
  13053. ggml_context * lora_ctx = ggml_init(lora_init_params);
  13054. if (lora_ctx == nullptr) {
  13055. LLAMA_LOG_ERROR("%s: error: failed to initialize lora context\n", __func__);
  13056. ggml_backend_free(backend_cpu);
  13057. return 1;
  13058. }
  13059. // create tensors
  13060. ggml_tensor * loraA = ggml_new_tensor_2d(lora_ctx, metaA.type, metaA.ne[0], metaA.ne[1]);
  13061. ggml_tensor * loraB = ggml_new_tensor_2d(lora_ctx, metaB.type, metaB.ne[0], metaB.ne[1]);
  13062. ggml_set_name(loraA, metaA.name.c_str());
  13063. ggml_set_name(loraB, metaB.name.c_str());
  13064. ggml_tensor * base_t;
  13065. if (ml) {
  13066. if (!ml->get_tensor_meta(base_name.c_str())) {
  13067. LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  13068. return 1;
  13069. }
  13070. base_t = ggml_dup_tensor(lora_ctx, ml->get_tensor_meta(base_name.c_str()));
  13071. } else {
  13072. base_t = ggml_dup_tensor(lora_ctx, model_t);
  13073. }
  13074. ggml_set_name(base_t, base_name.c_str());
  13075. // allocate in backend buffer
  13076. ggml_backend_buffer_t lora_buf = ggml_backend_alloc_ctx_tensors_from_buft(lora_ctx, ggml_backend_cpu_buffer_type());
  13077. if (lora_buf == nullptr) {
  13078. LLAMA_LOG_ERROR("%s: error: failed to allocate lora tensors\n", __func__);
  13079. return 1;
  13080. }
  13081. // load tensor data
  13082. auto load_tensor = [&read_buf, &fin](const tensor_meta & tensor_meta, ggml_tensor * tensor) {
  13083. read_buf.resize(ggml_nbytes(tensor));
  13084. fin.seek(tensor_meta.offset, SEEK_SET);
  13085. fin.read_raw(read_buf.data(), ggml_nbytes(tensor));
  13086. ggml_backend_tensor_set(tensor, read_buf.data(), 0, read_buf.size());
  13087. };
  13088. load_tensor(metaA, loraA);
  13089. load_tensor(metaB, loraB);
  13090. // load base model tensor data
  13091. if (ml) {
  13092. ml->load_data_for(base_t);
  13093. } else {
  13094. ggml_backend_tensor_copy(model_t, base_t);
  13095. }
  13096. if (ggml_is_quantized(base_t->type) && !warned) {
  13097. LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  13098. "use a f16 or f32 base model with --lora-base\n", __func__);
  13099. warned = true;
  13100. }
  13101. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  13102. LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  13103. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  13104. ggml_free(lora_ctx);
  13105. ggml_backend_buffer_free(lora_buf);
  13106. ggml_backend_free(backend_cpu);
  13107. return 1;
  13108. }
  13109. auto build_lora_graph = [&]() {
  13110. // w = w + BA*s
  13111. ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
  13112. ggml_set_name(BA, "BA");
  13113. if (scaling != 1.0f) {
  13114. BA = ggml_scale(lora_ctx, BA, scaling);
  13115. ggml_set_name(BA, "BA_scaled");
  13116. }
  13117. ggml_tensor * r;
  13118. r = ggml_add_inplace(lora_ctx, base_t, BA);
  13119. ggml_set_name(r, "r_add");
  13120. if (base_t->type != model_t->type) {
  13121. // convert the result to the model type
  13122. r = ggml_cast(lora_ctx, r, model_t->type);
  13123. ggml_set_name(r, "r_cast");
  13124. }
  13125. return r;
  13126. };
  13127. ggml_cgraph * gf = ggml_new_graph(lora_ctx);
  13128. ggml_tensor * r = build_lora_graph();
  13129. ggml_build_forward_expand(gf, r);
  13130. ggml_backend_buffer_t graph_buf = ggml_backend_alloc_ctx_tensors_from_buft(lora_ctx, ggml_backend_cpu_buffer_type());
  13131. if (graph_buf == nullptr) {
  13132. LLAMA_LOG_ERROR("%s: error: failed to allocate graph tensors\n", __func__);
  13133. ggml_free(lora_ctx);
  13134. ggml_backend_buffer_free(lora_buf);
  13135. ggml_backend_free(backend_cpu);
  13136. return 1;
  13137. }
  13138. ggml_backend_graph_compute(backend_cpu, gf);
  13139. ggml_backend_tensor_set(model_t, r->data, 0, ggml_nbytes(r));
  13140. #if 0
  13141. // TODO: use scheduler with fallback to CPU for less copies between CPU and GPU
  13142. //ggml_backend_sched_t sched = ggml_backend_sched_new(backends.data(), backends.size(), GGML_DEFAULT_GRAPH_SIZE);
  13143. // sched compute
  13144. ggml_build_forward_expand(gf, build_graph());
  13145. ggml_backend_sched_init_measure(sched, gf);
  13146. // create the graph again, since the previous one was destroyed by the measure
  13147. ggml_graph_clear(gf);
  13148. ggml_build_forward_expand(gf, build_graph());
  13149. ggml_backend_sched_graph_compute(sched, gf);
  13150. ggml_backend_sched_free(sched);
  13151. #endif
  13152. ggml_backend_buffer_free(lora_buf);
  13153. ggml_backend_buffer_free(graph_buf);
  13154. ggml_free(lora_ctx);
  13155. n_tensors++;
  13156. if (n_tensors % 4 == 0) {
  13157. LLAMA_LOG_INFO(".");
  13158. }
  13159. }
  13160. ggml_backend_free(backend_cpu);
  13161. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  13162. LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
  13163. return 0;
  13164. }
  13165. //
  13166. // interface implementation
  13167. //
  13168. struct llama_model_params llama_model_default_params() {
  13169. struct llama_model_params result = {
  13170. /*.n_gpu_layers =*/ 0,
  13171. /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
  13172. /*.main_gpu =*/ 0,
  13173. /*.tensor_split =*/ nullptr,
  13174. /*.rpc_servers =*/ nullptr,
  13175. /*.progress_callback =*/ nullptr,
  13176. /*.progress_callback_user_data =*/ nullptr,
  13177. /*.kv_overrides =*/ nullptr,
  13178. /*.vocab_only =*/ false,
  13179. /*.use_mmap =*/ true,
  13180. /*.use_mlock =*/ false,
  13181. /*.check_tensors =*/ false,
  13182. };
  13183. #ifdef GGML_USE_METAL
  13184. // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
  13185. result.n_gpu_layers = 999;
  13186. #endif
  13187. return result;
  13188. }
  13189. struct llama_context_params llama_context_default_params() {
  13190. struct llama_context_params result = {
  13191. /*.seed =*/ LLAMA_DEFAULT_SEED,
  13192. /*.n_ctx =*/ 512,
  13193. /*.n_batch =*/ 2048,
  13194. /*.n_ubatch =*/ 512,
  13195. /*.n_seq_max =*/ 1,
  13196. /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
  13197. /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
  13198. /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
  13199. /*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED,
  13200. /*.rope_freq_base =*/ 0.0f,
  13201. /*.rope_freq_scale =*/ 0.0f,
  13202. /*.yarn_ext_factor =*/ -1.0f,
  13203. /*.yarn_attn_factor =*/ 1.0f,
  13204. /*.yarn_beta_fast =*/ 32.0f,
  13205. /*.yarn_beta_slow =*/ 1.0f,
  13206. /*.yarn_orig_ctx =*/ 0,
  13207. /*.defrag_thold =*/ -1.0f,
  13208. /*.cb_eval =*/ nullptr,
  13209. /*.cb_eval_user_data =*/ nullptr,
  13210. /*.type_k =*/ GGML_TYPE_F16,
  13211. /*.type_v =*/ GGML_TYPE_F16,
  13212. /*.logits_all =*/ false,
  13213. /*.embeddings =*/ false,
  13214. /*.offload_kqv =*/ true,
  13215. /*.flash_attn =*/ false,
  13216. /*.abort_callback =*/ nullptr,
  13217. /*.abort_callback_data =*/ nullptr,
  13218. };
  13219. return result;
  13220. }
  13221. struct llama_model_quantize_params llama_model_quantize_default_params() {
  13222. struct llama_model_quantize_params result = {
  13223. /*.nthread =*/ 0,
  13224. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  13225. /*.output_tensor_type =*/ GGML_TYPE_COUNT,
  13226. /*.token_embedding_type =*/ GGML_TYPE_COUNT,
  13227. /*.allow_requantize =*/ false,
  13228. /*.quantize_output_tensor =*/ true,
  13229. /*.only_copy =*/ false,
  13230. /*.pure =*/ false,
  13231. /*.keep_split =*/ false,
  13232. /*.imatrix =*/ nullptr,
  13233. /*.kv_overrides =*/ nullptr,
  13234. };
  13235. return result;
  13236. }
  13237. size_t llama_max_devices(void) {
  13238. #if defined(GGML_USE_RPC)
  13239. return GGML_RPC_MAX_SERVERS;
  13240. #elif defined(GGML_USE_METAL)
  13241. return 1;
  13242. #elif defined(GGML_USE_CUDA)
  13243. return GGML_CUDA_MAX_DEVICES;
  13244. #elif defined(GGML_USE_SYCL)
  13245. return GGML_SYCL_MAX_DEVICES;
  13246. #elif defined(GGML_USE_VULKAN)
  13247. return GGML_VK_MAX_DEVICES;
  13248. #else
  13249. return 1;
  13250. #endif
  13251. }
  13252. bool llama_supports_mmap(void) {
  13253. return llama_mmap::SUPPORTED;
  13254. }
  13255. bool llama_supports_mlock(void) {
  13256. return llama_mlock::SUPPORTED;
  13257. }
  13258. bool llama_supports_gpu_offload(void) {
  13259. #if defined(GGML_USE_CUDA) || defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \
  13260. defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_RPC)
  13261. // Defined when llama.cpp is compiled with support for offloading model layers to GPU.
  13262. return true;
  13263. #else
  13264. return false;
  13265. #endif
  13266. }
  13267. void llama_backend_init(void) {
  13268. ggml_time_init();
  13269. // needed to initialize f16 tables
  13270. {
  13271. struct ggml_init_params params = { 0, NULL, false };
  13272. struct ggml_context * ctx = ggml_init(params);
  13273. ggml_free(ctx);
  13274. }
  13275. }
  13276. void llama_numa_init(enum ggml_numa_strategy numa) {
  13277. if (numa != GGML_NUMA_STRATEGY_DISABLED) {
  13278. ggml_numa_init(numa);
  13279. }
  13280. }
  13281. void llama_backend_free(void) {
  13282. ggml_quantize_free();
  13283. }
  13284. int64_t llama_time_us(void) {
  13285. return ggml_time_us();
  13286. }
  13287. struct llama_model * llama_load_model_from_file(
  13288. const char * path_model,
  13289. struct llama_model_params params) {
  13290. ggml_time_init();
  13291. llama_model * model = new llama_model;
  13292. unsigned cur_percentage = 0;
  13293. if (params.progress_callback == NULL) {
  13294. params.progress_callback_user_data = &cur_percentage;
  13295. params.progress_callback = [](float progress, void * ctx) {
  13296. unsigned * cur_percentage_p = (unsigned *) ctx;
  13297. unsigned percentage = (unsigned) (100 * progress);
  13298. while (percentage > *cur_percentage_p) {
  13299. *cur_percentage_p = percentage;
  13300. LLAMA_LOG_INFO(".");
  13301. if (percentage >= 100) {
  13302. LLAMA_LOG_INFO("\n");
  13303. }
  13304. }
  13305. return true;
  13306. };
  13307. }
  13308. if (params.rpc_servers != nullptr && params.rpc_servers[0] != '\0') {
  13309. // split the servers set them into model->rpc_servers
  13310. std::string servers(params.rpc_servers);
  13311. size_t pos = 0;
  13312. while ((pos = servers.find(",")) != std::string::npos) {
  13313. std::string server = servers.substr(0, pos);
  13314. model->rpc_servers.push_back(server);
  13315. servers.erase(0, pos + 1);
  13316. }
  13317. model->rpc_servers.push_back(servers);
  13318. }
  13319. int status = llama_model_load(path_model, *model, params);
  13320. GGML_ASSERT(status <= 0);
  13321. if (status < 0) {
  13322. if (status == -1) {
  13323. LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  13324. } else if (status == -2) {
  13325. LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  13326. }
  13327. delete model;
  13328. return nullptr;
  13329. }
  13330. return model;
  13331. }
  13332. void llama_free_model(struct llama_model * model) {
  13333. delete model;
  13334. }
  13335. struct llama_context * llama_new_context_with_model(
  13336. struct llama_model * model,
  13337. struct llama_context_params params) {
  13338. if (!model) {
  13339. LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__);
  13340. return nullptr;
  13341. }
  13342. if (params.n_batch == 0 && params.n_ubatch == 0) {
  13343. LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__);
  13344. return nullptr;
  13345. }
  13346. if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) {
  13347. LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__);
  13348. return nullptr;
  13349. }
  13350. if (params.flash_attn && model->arch == LLM_ARCH_GROK) {
  13351. LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__);
  13352. params.flash_attn = false;
  13353. }
  13354. if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
  13355. LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
  13356. return nullptr;
  13357. }
  13358. llama_context * ctx = new llama_context(*model);
  13359. const auto & hparams = model->hparams;
  13360. auto & cparams = ctx->cparams;
  13361. cparams.n_seq_max = std::max(1u, params.n_seq_max);
  13362. cparams.n_threads = params.n_threads;
  13363. cparams.n_threads_batch = params.n_threads_batch;
  13364. cparams.yarn_ext_factor = params.yarn_ext_factor;
  13365. cparams.yarn_attn_factor = params.yarn_attn_factor;
  13366. cparams.yarn_beta_fast = params.yarn_beta_fast;
  13367. cparams.yarn_beta_slow = params.yarn_beta_slow;
  13368. cparams.defrag_thold = params.defrag_thold;
  13369. cparams.embeddings = params.embeddings;
  13370. cparams.offload_kqv = params.offload_kqv;
  13371. cparams.flash_attn = params.flash_attn;
  13372. cparams.pooling_type = params.pooling_type;
  13373. cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
  13374. cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
  13375. cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
  13376. // this is necessary due to kv_self.n being padded later during inference
  13377. cparams.n_ctx = GGML_PAD(cparams.n_ctx, llama_kv_cache_get_padding(cparams));
  13378. // with causal attention, the batch size is limited by the context size
  13379. cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
  13380. // the batch has to be at least GGML_KQ_MASK_PAD because we will be padding the KQ_mask
  13381. // this is required by GPU kernels in order to avoid out-of-bounds accesses (e.g. ggml_flash_attn_ext)
  13382. // ref: https://github.com/ggerganov/llama.cpp/pull/5021
  13383. if (cparams.n_batch < GGML_KQ_MASK_PAD) {
  13384. LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD);
  13385. cparams.n_batch = GGML_KQ_MASK_PAD;
  13386. }
  13387. cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);
  13388. cparams.n_ctx_orig_yarn = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
  13389. hparams.n_ctx_orig_yarn != 0 ? hparams.n_ctx_orig_yarn :
  13390. hparams.n_ctx_train;
  13391. cparams.cb_eval = params.cb_eval;
  13392. cparams.cb_eval_user_data = params.cb_eval_user_data;
  13393. auto rope_scaling_type = params.rope_scaling_type;
  13394. if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED) {
  13395. rope_scaling_type = hparams.rope_scaling_type_train;
  13396. }
  13397. if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_NONE) {
  13398. cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
  13399. }
  13400. if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
  13401. cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f;
  13402. }
  13403. cparams.yarn_attn_factor *= hparams.rope_attn_factor;
  13404. cparams.causal_attn = hparams.causal_attn;
  13405. if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
  13406. if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
  13407. cparams.pooling_type = LLAMA_POOLING_TYPE_NONE;
  13408. } else {
  13409. cparams.pooling_type = hparams.pooling_type;
  13410. }
  13411. }
  13412. if (params.seed == LLAMA_DEFAULT_SEED) {
  13413. params.seed = time(NULL);
  13414. }
  13415. LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
  13416. LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch);
  13417. LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch);
  13418. LLAMA_LOG_INFO("%s: flash_attn = %d\n", __func__, cparams.flash_attn);
  13419. LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
  13420. LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
  13421. ctx->abort_callback = params.abort_callback;
  13422. ctx->abort_callback_data = params.abort_callback_data;
  13423. ctx->rng = std::mt19937(params.seed);
  13424. ctx->logits_all = params.logits_all;
  13425. uint32_t kv_size = cparams.n_ctx;
  13426. ggml_type type_k = params.type_k;
  13427. ggml_type type_v = params.type_v;
  13428. // Mamba only needs a constant number of KV cache cells per sequence
  13429. if (model->arch == LLM_ARCH_MAMBA) {
  13430. // Mamba needs at least as many KV cells as there are sequences kept at any time
  13431. kv_size = std::max((uint32_t) 1, params.n_seq_max);
  13432. // it's probably best to keep as much precision as possible for the states
  13433. type_k = GGML_TYPE_F32; // required by ggml_ssm_conv for Mamba's conv_states
  13434. type_v = GGML_TYPE_F32; // required by ggml_ssm_scan for Mamba's ssm_states
  13435. }
  13436. GGML_ASSERT(hparams.n_embd_head_k % ggml_blck_size(type_k) == 0);
  13437. GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0);
  13438. if (!hparams.vocab_only) {
  13439. // initialize backends
  13440. #if defined(GGML_USE_METAL)
  13441. if (model->n_gpu_layers > 0) {
  13442. ctx->backend_metal = ggml_backend_metal_init();
  13443. if (ctx->backend_metal == nullptr) {
  13444. LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__);
  13445. llama_free(ctx);
  13446. return nullptr;
  13447. }
  13448. ctx->backends.push_back(ctx->backend_metal);
  13449. }
  13450. #elif defined(GGML_USE_CUDA)
  13451. if (model->split_mode == LLAMA_SPLIT_MODE_NONE || model->split_mode == LLAMA_SPLIT_MODE_ROW) {
  13452. // with split_mode LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_ROW, only the main GPU backend is used
  13453. ggml_backend_t backend = ggml_backend_cuda_init(model->main_gpu);
  13454. if (backend == nullptr) {
  13455. LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, model->main_gpu);
  13456. llama_free(ctx);
  13457. return nullptr;
  13458. }
  13459. ctx->backends.push_back(backend);
  13460. } else {
  13461. // LLAMA_SPLIT_MODE_LAYER requires a backend for each GPU
  13462. for (int device = 0; device < ggml_backend_cuda_get_device_count(); ++device) {
  13463. ggml_backend_t backend = ggml_backend_cuda_init(device);
  13464. if (backend == nullptr) {
  13465. LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, device);
  13466. llama_free(ctx);
  13467. return nullptr;
  13468. }
  13469. ctx->backends.push_back(backend);
  13470. }
  13471. }
  13472. #elif defined(GGML_USE_VULKAN)
  13473. if (model->split_mode == LLAMA_SPLIT_MODE_ROW) {
  13474. LLAMA_LOG_ERROR("%s: Row split not supported. Failed to initialize Vulkan backend\n", __func__);
  13475. llama_free(ctx);
  13476. return nullptr;
  13477. }
  13478. if (model->split_mode == LLAMA_SPLIT_MODE_NONE) {
  13479. ggml_backend_t backend = ggml_backend_vk_init(model->main_gpu);
  13480. if (backend == nullptr) {
  13481. LLAMA_LOG_ERROR("%s: failed to initialize Vulkan backend\n", __func__);
  13482. llama_free(ctx);
  13483. return nullptr;
  13484. }
  13485. ctx->backends.push_back(backend);
  13486. } else {
  13487. for (int device = 0; device < ggml_backend_vk_get_device_count(); ++device) {
  13488. ggml_backend_t backend = ggml_backend_vk_init(device);
  13489. if (backend == nullptr) {
  13490. LLAMA_LOG_ERROR("%s: failed to initialize Vulkan%d backend\n", __func__, device);
  13491. llama_free(ctx);
  13492. return nullptr;
  13493. }
  13494. ctx->backends.push_back(backend);
  13495. }
  13496. }
  13497. #elif defined(GGML_USE_SYCL)
  13498. // with split_mode LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_ROW, only the main GPU backend is used
  13499. if (model->split_mode == LLAMA_SPLIT_MODE_NONE || model->split_mode == LLAMA_SPLIT_MODE_ROW) {
  13500. ggml_backend_t backend = ggml_backend_sycl_init(model->main_gpu);
  13501. if (backend == nullptr) {
  13502. int main_gpu_id = ggml_backend_sycl_get_device_id(model->main_gpu);
  13503. LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d (index %d) backend\n", __func__, main_gpu_id, model->main_gpu);
  13504. llama_free(ctx);
  13505. return nullptr;
  13506. }
  13507. ctx->backends.push_back(backend);
  13508. } else {
  13509. // LLAMA_SPLIT_LAYER requires a backend for each GPU
  13510. for (int i = 0; i < ggml_backend_sycl_get_device_count(); ++i) {
  13511. ggml_backend_t backend = ggml_backend_sycl_init(i);
  13512. if (backend == nullptr) {
  13513. int id_list[GGML_SYCL_MAX_DEVICES];
  13514. ggml_sycl_get_gpu_list(id_list, GGML_SYCL_MAX_DEVICES);
  13515. LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d (index %d) backend\n", __func__, id_list[i], i);
  13516. llama_free(ctx);
  13517. return nullptr;
  13518. }
  13519. ctx->backends.push_back(backend);
  13520. }
  13521. }
  13522. #elif defined(GGML_USE_KOMPUTE)
  13523. if (model->n_gpu_layers > 0) {
  13524. auto * backend = ggml_backend_kompute_init(model->main_gpu);
  13525. if (backend == nullptr) {
  13526. LLAMA_LOG_ERROR("%s: failed to initialize Kompute backend\n", __func__);
  13527. llama_free(ctx);
  13528. return nullptr;
  13529. }
  13530. ctx->backends.push_back(backend);
  13531. }
  13532. #endif
  13533. #if defined(GGML_USE_RPC)
  13534. if (model->n_gpu_layers > 0) {
  13535. for (const auto & endpoint : model->rpc_servers) {
  13536. ggml_backend_t backend = ggml_backend_rpc_init(endpoint.c_str());
  13537. if (backend == nullptr) {
  13538. LLAMA_LOG_ERROR("%s: failed to initialize RPC to '%s'\n", __func__, endpoint.c_str());
  13539. llama_free(ctx);
  13540. return nullptr;
  13541. }
  13542. ctx->backends.push_back(backend);
  13543. }
  13544. }
  13545. #endif
  13546. ctx->backend_cpu = ggml_backend_cpu_init();
  13547. if (ctx->backend_cpu == nullptr) {
  13548. LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__);
  13549. llama_free(ctx);
  13550. return nullptr;
  13551. }
  13552. ctx->backends.push_back(ctx->backend_cpu);
  13553. if (!llama_kv_cache_init(ctx->kv_self, ctx, type_k, type_v, kv_size, cparams.offload_kqv)) {
  13554. LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
  13555. llama_free(ctx);
  13556. return nullptr;
  13557. }
  13558. {
  13559. size_t memory_size_k = 0;
  13560. size_t memory_size_v = 0;
  13561. for (auto & k : ctx->kv_self.k_l) {
  13562. memory_size_k += ggml_nbytes(k);
  13563. }
  13564. for (auto & v : ctx->kv_self.v_l) {
  13565. memory_size_v += ggml_nbytes(v);
  13566. }
  13567. LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
  13568. (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
  13569. ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
  13570. ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
  13571. }
  13572. // graph outputs buffer
  13573. {
  13574. // resized during inference when a batch uses more outputs
  13575. if (llama_output_reserve(*ctx, params.n_seq_max) < params.n_seq_max) {
  13576. LLAMA_LOG_ERROR("%s: failed to reserve initial output buffer\n", __func__);
  13577. llama_free(ctx);
  13578. return nullptr;
  13579. }
  13580. LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__,
  13581. ggml_backend_buffer_name(ctx->buf_output),
  13582. ggml_backend_buffer_get_size(ctx->buf_output) / 1024.0 / 1024.0);
  13583. }
  13584. // scheduler and compute buffers
  13585. {
  13586. // buffer types used for the compute buffer of each backend
  13587. std::vector<ggml_backend_buffer_type_t> backend_buft;
  13588. for (auto * backend : ctx->backends) {
  13589. if (ggml_backend_is_cpu(backend)) {
  13590. // use host buffers for the CPU backend compute buffer
  13591. backend_buft.push_back(llama_default_buffer_type_cpu(true));
  13592. } else {
  13593. backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
  13594. }
  13595. }
  13596. // buffer used to store the computation graph and the tensor meta data
  13597. ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead_custom(LLAMA_MAX_NODES, false));
  13598. // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary
  13599. bool pipeline_parallel =
  13600. llama_get_device_count(*model) > 1 &&
  13601. model->n_gpu_layers > (int)model->hparams.n_layer &&
  13602. model->split_mode == LLAMA_SPLIT_MODE_LAYER &&
  13603. params.offload_kqv;
  13604. #ifndef GGML_USE_CUDA
  13605. // pipeline parallelism requires support for async compute and events
  13606. // currently this is only implemented in the CUDA backend
  13607. pipeline_parallel = false;
  13608. #endif
  13609. ctx->sched = ggml_backend_sched_new(ctx->backends.data(), backend_buft.data(), ctx->backends.size(), LLAMA_MAX_NODES, pipeline_parallel);
  13610. if (pipeline_parallel) {
  13611. LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(ctx->sched));
  13612. }
  13613. // build worst-case graph
  13614. int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_ubatch);
  13615. int n_past = cparams.n_ctx - n_tokens;
  13616. llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  13617. ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0), true);
  13618. // initialize scheduler with the worst-case graph
  13619. if (!ggml_backend_sched_reserve(ctx->sched, gf)) {
  13620. LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
  13621. llama_free(ctx);
  13622. return nullptr;
  13623. }
  13624. for (size_t i = 0; i < ctx->backends.size(); i++) {
  13625. ggml_backend_t backend = ctx->backends[i];
  13626. ggml_backend_buffer_type_t buft = backend_buft[i];
  13627. size_t size = ggml_backend_sched_get_buffer_size(ctx->sched, backend);
  13628. if (size > 1) {
  13629. LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
  13630. ggml_backend_buft_name(buft),
  13631. size / 1024.0 / 1024.0);
  13632. }
  13633. }
  13634. // note: the number of splits during measure is higher than during inference due to the kv shift
  13635. int n_splits = ggml_backend_sched_get_n_splits(ctx->sched);
  13636. LLAMA_LOG_INFO("%s: graph nodes = %d\n", __func__, gf->n_nodes);
  13637. LLAMA_LOG_INFO("%s: graph splits = %d\n", __func__, n_splits);
  13638. }
  13639. }
  13640. return ctx;
  13641. }
  13642. void llama_free(struct llama_context * ctx) {
  13643. delete ctx;
  13644. }
  13645. const llama_model * llama_get_model(const struct llama_context * ctx) {
  13646. return &ctx->model;
  13647. }
  13648. uint32_t llama_n_ctx(const struct llama_context * ctx) {
  13649. return ctx->cparams.n_ctx;
  13650. }
  13651. uint32_t llama_n_batch(const struct llama_context * ctx) {
  13652. return ctx->cparams.n_batch;
  13653. }
  13654. uint32_t llama_n_ubatch(const struct llama_context * ctx) {
  13655. return ctx->cparams.n_ubatch;
  13656. }
  13657. uint32_t llama_n_seq_max(const struct llama_context * ctx) {
  13658. return ctx->kv_self.size;
  13659. }
  13660. enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
  13661. return model->vocab.type;
  13662. }
  13663. enum llama_rope_type llama_rope_type(const struct llama_model * model) {
  13664. switch (model->arch) {
  13665. // these models do not use RoPE
  13666. case LLM_ARCH_GPT2:
  13667. case LLM_ARCH_GPTJ:
  13668. case LLM_ARCH_MPT:
  13669. case LLM_ARCH_REFACT:
  13670. case LLM_ARCH_BLOOM:
  13671. case LLM_ARCH_MAMBA:
  13672. case LLM_ARCH_JINA_BERT_V2:
  13673. return LLAMA_ROPE_TYPE_NONE;
  13674. // use what we call a normal RoPE, operating on pairs of consecutive head values
  13675. case LLM_ARCH_LLAMA:
  13676. case LLM_ARCH_BAICHUAN:
  13677. case LLM_ARCH_STARCODER:
  13678. case LLM_ARCH_PLAMO:
  13679. case LLM_ARCH_CODESHELL:
  13680. case LLM_ARCH_ORION:
  13681. case LLM_ARCH_INTERNLM2:
  13682. case LLM_ARCH_MINICPM:
  13683. case LLM_ARCH_XVERSE:
  13684. case LLM_ARCH_COMMAND_R:
  13685. case LLM_ARCH_OLMO:
  13686. case LLM_ARCH_ARCTIC:
  13687. case LLM_ARCH_DEEPSEEK2:
  13688. return LLAMA_ROPE_TYPE_NORM;
  13689. // the pairs of head values are offset by n_rot/2
  13690. case LLM_ARCH_FALCON:
  13691. case LLM_ARCH_GROK:
  13692. case LLM_ARCH_DBRX:
  13693. case LLM_ARCH_BERT:
  13694. case LLM_ARCH_NOMIC_BERT:
  13695. case LLM_ARCH_STABLELM:
  13696. case LLM_ARCH_QWEN:
  13697. case LLM_ARCH_QWEN2:
  13698. case LLM_ARCH_QWEN2MOE:
  13699. case LLM_ARCH_PHI2:
  13700. case LLM_ARCH_PHI3:
  13701. case LLM_ARCH_GEMMA:
  13702. case LLM_ARCH_STARCODER2:
  13703. case LLM_ARCH_GPTNEOX:
  13704. return LLAMA_ROPE_TYPE_NEOX;
  13705. // all model arches should be listed explicitly here
  13706. case LLM_ARCH_UNKNOWN:
  13707. GGML_ASSERT(false && "unknown architecture");
  13708. break;
  13709. }
  13710. return LLAMA_ROPE_TYPE_NONE;
  13711. }
  13712. enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx) {
  13713. return ctx->cparams.pooling_type;
  13714. }
  13715. int32_t llama_n_vocab(const struct llama_model * model) {
  13716. return model->hparams.n_vocab;
  13717. }
  13718. int32_t llama_n_ctx_train(const struct llama_model * model) {
  13719. return model->hparams.n_ctx_train;
  13720. }
  13721. int32_t llama_n_embd(const struct llama_model * model) {
  13722. return model->hparams.n_embd;
  13723. }
  13724. int32_t llama_n_layer(const struct llama_model * model) {
  13725. return model->hparams.n_layer;
  13726. }
  13727. float llama_rope_freq_scale_train(const struct llama_model * model) {
  13728. return model->hparams.rope_freq_scale_train;
  13729. }
  13730. int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
  13731. const auto & it = model->gguf_kv.find(key);
  13732. if (it == model->gguf_kv.end()) {
  13733. if (buf_size > 0) {
  13734. buf[0] = '\0';
  13735. }
  13736. return -1;
  13737. }
  13738. return snprintf(buf, buf_size, "%s", it->second.c_str());
  13739. }
  13740. int32_t llama_model_meta_count(const struct llama_model * model) {
  13741. return (int)model->gguf_kv.size();
  13742. }
  13743. int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
  13744. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  13745. if (buf_size > 0) {
  13746. buf[0] = '\0';
  13747. }
  13748. return -1;
  13749. }
  13750. auto it = model->gguf_kv.begin();
  13751. std::advance(it, i);
  13752. return snprintf(buf, buf_size, "%s", it->first.c_str());
  13753. }
  13754. int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
  13755. if (i < 0 || i >= (int)model->gguf_kv.size()) {
  13756. if (buf_size > 0) {
  13757. buf[0] = '\0';
  13758. }
  13759. return -1;
  13760. }
  13761. auto it = model->gguf_kv.begin();
  13762. std::advance(it, i);
  13763. return snprintf(buf, buf_size, "%s", it->second.c_str());
  13764. }
  13765. int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
  13766. return snprintf(buf, buf_size, "%s %s %s",
  13767. llama_model_arch_name(model->arch),
  13768. llama_model_type_name(model->type),
  13769. llama_model_ftype_name(model->ftype).c_str());
  13770. }
  13771. uint64_t llama_model_size(const struct llama_model * model) {
  13772. uint64_t size = 0;
  13773. for (const auto & it : model->tensors_by_name) {
  13774. size += ggml_nbytes(it.second);
  13775. }
  13776. return size;
  13777. }
  13778. uint64_t llama_model_n_params(const struct llama_model * model) {
  13779. uint64_t nparams = 0;
  13780. for (const auto & it : model->tensors_by_name) {
  13781. nparams += ggml_nelements(it.second);
  13782. }
  13783. return nparams;
  13784. }
  13785. struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
  13786. auto it = std::find_if(model->tensors_by_name.begin(), model->tensors_by_name.end(),
  13787. [name](const std::pair<std::string, struct ggml_tensor *> & it) {
  13788. return it.first == name;
  13789. });
  13790. if (it == model->tensors_by_name.end()) {
  13791. return nullptr;
  13792. }
  13793. return it->second;
  13794. }
  13795. uint32_t llama_model_quantize(
  13796. const char * fname_inp,
  13797. const char * fname_out,
  13798. const llama_model_quantize_params * params) {
  13799. try {
  13800. llama_model_quantize_internal(fname_inp, fname_out, params);
  13801. return 0;
  13802. } catch (const std::exception & err) {
  13803. LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
  13804. return 1;
  13805. }
  13806. }
  13807. int32_t llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, float scale, const char * path_base_model, int32_t n_threads) {
  13808. try {
  13809. return llama_apply_lora_from_file_internal(*model, path_lora, scale, path_base_model, n_threads);
  13810. } catch (const std::exception & err) {
  13811. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  13812. return 1;
  13813. }
  13814. }
  13815. static bool llama_control_vector_init(struct llama_control_vector & cvec, const llama_model & model) {
  13816. GGML_ASSERT(cvec.tensors.empty());
  13817. GGML_ASSERT(cvec.ctxs.empty());
  13818. GGML_ASSERT(cvec.bufs.empty());
  13819. // count layer buffer types
  13820. std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
  13821. for (int64_t i = 0; i < model.hparams.n_layer; i++) {
  13822. buft_layer_count[model.buft_layer[i].buft]++;
  13823. }
  13824. // allocate contexts
  13825. std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
  13826. for (auto & it : buft_layer_count) {
  13827. int n_layers = it.second;
  13828. struct ggml_init_params params = {
  13829. /*.mem_size =*/ n_layers * ggml_tensor_overhead(),
  13830. /*.mem_buffer =*/ NULL,
  13831. /*.no_alloc =*/ true,
  13832. };
  13833. ggml_context * ctx = ggml_init(params);
  13834. if (!ctx) {
  13835. LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
  13836. return 1;
  13837. }
  13838. ctx_map[it.first] = ctx;
  13839. }
  13840. // make tensors
  13841. cvec.tensors.reserve(model.hparams.n_layer);
  13842. cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0
  13843. for (size_t il = 1; il < model.hparams.n_layer; il++) {
  13844. struct ggml_context * ctx = ctx_map.at(model.buft_layer[il].buft);
  13845. ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
  13846. cvec.tensors.push_back(tensor);
  13847. }
  13848. // allocate tensors / buffers and zero
  13849. cvec.ctxs.reserve(ctx_map.size());
  13850. cvec.bufs.reserve(ctx_map.size());
  13851. for (auto it : ctx_map) {
  13852. ggml_backend_buffer_type_t buft = it.first;
  13853. ggml_context * ctx = it.second;
  13854. ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
  13855. if (!buf) {
  13856. LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
  13857. return false;
  13858. }
  13859. ggml_backend_buffer_clear(buf, 0);
  13860. cvec.ctxs.push_back(ctx);
  13861. cvec.bufs.push_back(buf);
  13862. }
  13863. return true;
  13864. }
  13865. int32_t llama_control_vector_apply(struct llama_context * lctx, const float * data, size_t len, int32_t n_embd, int32_t il_start, int32_t il_end) {
  13866. const llama_model & model = lctx->model;
  13867. llama_control_vector & cvec = lctx->cvec;
  13868. if (data == nullptr) {
  13869. // disable the current control vector (but leave allocated for later)
  13870. cvec.layer_start = -1;
  13871. cvec.layer_end = -1;
  13872. return 0;
  13873. }
  13874. if (n_embd != (int) model.hparams.n_embd) {
  13875. LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
  13876. return 1;
  13877. }
  13878. if (cvec.tensors.empty()) {
  13879. if (!llama_control_vector_init(cvec, model)) {
  13880. return 1;
  13881. }
  13882. }
  13883. cvec.layer_start = il_start;
  13884. cvec.layer_end = il_end;
  13885. for (size_t il = 1; il < model.hparams.n_layer; il++) {
  13886. assert(cvec.tensors[il] != nullptr);
  13887. const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
  13888. if (off + n_embd <= len) {
  13889. ggml_backend_tensor_set(cvec.tensors[il], data + off, 0, n_embd * ggml_element_size(cvec.tensors[il]));
  13890. }
  13891. }
  13892. return 0;
  13893. }
  13894. struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max) {
  13895. struct llama_kv_cache_view result = {
  13896. /*.n_cells = */ 0,
  13897. /*.n_seq_max = */ n_seq_max,
  13898. /*.token_count = */ 0,
  13899. /*.used_cells = */ llama_get_kv_cache_used_cells(ctx),
  13900. /*.max_contiguous = */ 0,
  13901. /*.max_contiguous_idx = */ -1,
  13902. /*.cells = */ nullptr,
  13903. /*.cells_sequences = */ nullptr,
  13904. };
  13905. return result;
  13906. }
  13907. void llama_kv_cache_view_free(struct llama_kv_cache_view * view) {
  13908. if (view->cells != nullptr) {
  13909. free(view->cells);
  13910. view->cells = nullptr;
  13911. }
  13912. if (view->cells_sequences != nullptr) {
  13913. free(view->cells_sequences);
  13914. view->cells_sequences = nullptr;
  13915. }
  13916. }
  13917. void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) {
  13918. if (uint32_t(view->n_cells) < ctx->kv_self.size || view->cells == nullptr) {
  13919. view->n_cells = int32_t(ctx->kv_self.size);
  13920. void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells);
  13921. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells");
  13922. view->cells = (struct llama_kv_cache_view_cell *)p;
  13923. p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_seq_max * view->n_cells);
  13924. GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences");
  13925. view->cells_sequences = (llama_seq_id *)p;
  13926. }
  13927. const std::vector<llama_kv_cell> & kv_cells = ctx->kv_self.cells;
  13928. llama_kv_cache_view_cell * c_curr = view->cells;
  13929. llama_seq_id * cs_curr = view->cells_sequences;
  13930. int32_t used_cells = 0;
  13931. int32_t token_count = 0;
  13932. int32_t curr_contig_idx = -1;
  13933. uint32_t max_contig = 0;
  13934. int32_t max_contig_idx = -1;
  13935. for (int32_t i = 0; i < int32_t(ctx->kv_self.size); i++, c_curr++, cs_curr += view->n_seq_max) {
  13936. const size_t curr_size = kv_cells[i].seq_id.size();
  13937. token_count += curr_size;
  13938. c_curr->pos = kv_cells[i].pos + kv_cells[i].delta;
  13939. if (curr_size > 0) {
  13940. if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) {
  13941. max_contig = i - curr_contig_idx;
  13942. max_contig_idx = curr_contig_idx;
  13943. }
  13944. curr_contig_idx = -1;
  13945. } else if (curr_contig_idx < 0) {
  13946. curr_contig_idx = i;
  13947. }
  13948. int seq_idx = 0;
  13949. for (const llama_seq_id it : kv_cells[i].seq_id) {
  13950. if (seq_idx >= view->n_seq_max) {
  13951. break;
  13952. }
  13953. cs_curr[seq_idx] = it;
  13954. seq_idx++;
  13955. }
  13956. if (seq_idx != 0) {
  13957. used_cells++;
  13958. }
  13959. for (; seq_idx < view->n_seq_max; seq_idx++) {
  13960. cs_curr[seq_idx] = -1;
  13961. }
  13962. }
  13963. if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) {
  13964. max_contig_idx = curr_contig_idx;
  13965. max_contig = kv_cells.size() - curr_contig_idx;
  13966. }
  13967. view->max_contiguous = max_contig;
  13968. view->max_contiguous_idx = max_contig_idx;
  13969. view->token_count = token_count;
  13970. view->used_cells = used_cells;
  13971. if (uint32_t(used_cells) != ctx->kv_self.used) {
  13972. LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n",
  13973. __func__, ctx->kv_self.used, used_cells);
  13974. }
  13975. }
  13976. int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  13977. int result = 0;
  13978. for (uint32_t i = 0; i < ctx->kv_self.size; i++) {
  13979. result += ctx->kv_self.cells[i].seq_id.size();
  13980. }
  13981. return result;
  13982. }
  13983. int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx) {
  13984. return ctx->kv_self.used;
  13985. }
  13986. void llama_kv_cache_clear(struct llama_context * ctx) {
  13987. llama_kv_cache_clear(ctx->kv_self);
  13988. }
  13989. bool llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
  13990. return llama_kv_cache_seq_rm(ctx->kv_self, seq_id, p0, p1);
  13991. }
  13992. void llama_kv_cache_seq_cp(struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
  13993. if (seq_id_src == seq_id_dst) {
  13994. return;
  13995. }
  13996. llama_kv_cache_seq_cp(ctx->kv_self, seq_id_src, seq_id_dst, p0, p1);
  13997. }
  13998. void llama_kv_cache_seq_keep(struct llama_context * ctx, llama_seq_id seq_id) {
  13999. llama_kv_cache_seq_keep(ctx->kv_self, seq_id);
  14000. }
  14001. void llama_kv_cache_seq_add(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) {
  14002. if (delta == 0) {
  14003. return;
  14004. }
  14005. llama_kv_cache_seq_add(ctx->kv_self, seq_id, p0, p1, delta);
  14006. }
  14007. void llama_kv_cache_seq_div(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
  14008. if (d == 1) {
  14009. return;
  14010. }
  14011. llama_kv_cache_seq_div(ctx->kv_self, seq_id, p0, p1, d);
  14012. }
  14013. llama_pos llama_kv_cache_seq_pos_max(struct llama_context * ctx, llama_seq_id seq_id) {
  14014. return llama_kv_cache_seq_pos_max(ctx->kv_self, seq_id);
  14015. }
  14016. void llama_kv_cache_defrag(struct llama_context * ctx) {
  14017. llama_kv_cache_defrag(ctx->kv_self);
  14018. }
  14019. void llama_kv_cache_update(struct llama_context * ctx) {
  14020. llama_kv_cache_update_internal(*ctx);
  14021. }
  14022. // deprecated
  14023. size_t llama_get_state_size(const struct llama_context * ctx) {
  14024. return llama_state_get_size(ctx);
  14025. }
  14026. // deprecated
  14027. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  14028. return llama_state_get_data(ctx, dst);
  14029. }
  14030. // deprecated
  14031. size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) {
  14032. return llama_state_set_data(ctx, src);
  14033. }
  14034. // deprecated
  14035. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  14036. return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  14037. }
  14038. // deprecated
  14039. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  14040. return llama_state_save_file(ctx, path_session, tokens, n_token_count);
  14041. }
  14042. // Returns the *maximum* size of the state
  14043. size_t llama_state_get_size(const struct llama_context * ctx) {
  14044. const auto & cparams = ctx->cparams;
  14045. const auto & hparams = ctx->model.hparams;
  14046. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  14047. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  14048. const size_t s_rng_size = sizeof(size_t);
  14049. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  14050. const size_t s_n_outputs = sizeof(size_t);
  14051. // assume worst case for outputs although only currently set ones are serialized
  14052. const size_t s_output_pos = ctx->cparams.n_batch * sizeof(int32_t);
  14053. const size_t s_logits_size = sizeof(size_t);
  14054. const size_t s_logits = ctx->logits_size ? cparams.n_batch * hparams.n_vocab * sizeof(float) : 0;
  14055. const size_t s_embedding_size = sizeof(size_t);
  14056. const size_t s_embedding = ctx->embd_size ? cparams.n_batch * hparams.n_embd * sizeof(float) : 0;
  14057. const size_t s_kv_buf_size = sizeof(size_t);
  14058. const size_t s_kv_head = sizeof(uint32_t);
  14059. const size_t s_kv_size = sizeof(uint32_t);
  14060. const size_t s_kv_used = sizeof(uint32_t);
  14061. const size_t s_v_trans = sizeof(uint32_t);
  14062. const size_t s_kv = ctx->kv_self.total_size();
  14063. const size_t s_kv_cell = sizeof(llama_pos) + sizeof(size_t) + cparams.n_seq_max*sizeof(llama_seq_id);
  14064. const size_t s_kv_cells = ctx->kv_self.size * s_kv_cell;
  14065. const size_t s_total = (
  14066. + s_rng_size
  14067. + s_rng
  14068. + s_n_outputs
  14069. + s_output_pos
  14070. + s_logits_size
  14071. + s_logits
  14072. + s_embedding_size
  14073. + s_embedding
  14074. + s_kv_buf_size
  14075. + s_kv_head
  14076. + s_kv_size
  14077. + s_kv_used
  14078. + s_v_trans
  14079. + s_kv
  14080. + s_kv_cells
  14081. );
  14082. // on session change it is very likely that the state size has changed - so we need to update this function
  14083. static_assert(LLAMA_SESSION_VERSION == 6, "So you just bumped the session version - good. But did you remember to update llama_state_get_size?");
  14084. return s_total;
  14085. }
  14086. // llama_context_data
  14087. struct llama_data_context {
  14088. virtual void write(const void * src, size_t size) = 0;
  14089. virtual size_t get_size_written() = 0;
  14090. virtual ~llama_data_context() = default;
  14091. };
  14092. struct llama_data_buffer_context : llama_data_context {
  14093. uint8_t * ptr;
  14094. size_t size_written = 0;
  14095. llama_data_buffer_context(uint8_t * p) : ptr(p) {}
  14096. void write(const void * src, size_t size) override {
  14097. memcpy(ptr, src, size);
  14098. ptr += size;
  14099. size_written += size;
  14100. }
  14101. size_t get_size_written() override {
  14102. return size_written;
  14103. }
  14104. };
  14105. struct llama_data_file_context : llama_data_context {
  14106. llama_file * file;
  14107. size_t size_written = 0;
  14108. llama_data_file_context(llama_file * f) : file(f) {}
  14109. void write(const void * src, size_t size) override {
  14110. file->write_raw(src, size);
  14111. size_written += size;
  14112. }
  14113. size_t get_size_written() override {
  14114. return size_written;
  14115. }
  14116. };
  14117. /** copy state data into either a buffer or file depending on the passed in context
  14118. *
  14119. * file context:
  14120. * llama_file file("/path", "wb");
  14121. * llama_data_file_context data_ctx(&file);
  14122. * llama_state_get_data(ctx, &data_ctx);
  14123. *
  14124. * buffer context:
  14125. * std::vector<uint8_t> buf(max_size, 0);
  14126. * llama_data_buffer_context data_ctx(&buf.data());
  14127. * llama_state_get_data(ctx, &data_ctx);
  14128. *
  14129. */
  14130. static void llama_state_get_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
  14131. llama_synchronize(ctx);
  14132. // copy rng
  14133. {
  14134. std::ostringstream rng_ss;
  14135. rng_ss << ctx->rng;
  14136. const std::string & rng_str = rng_ss.str();
  14137. const size_t rng_size = rng_str.size();
  14138. GGML_ASSERT(rng_size <= LLAMA_MAX_RNG_STATE);
  14139. data_ctx->write(&rng_size, sizeof(rng_size));
  14140. data_ctx->write(rng_str.data(), rng_size);
  14141. }
  14142. // copy outputs
  14143. {
  14144. // Can't use ctx->n_outputs because it's not for the
  14145. // entire last batch when n_ubatch is smaller than n_batch
  14146. size_t n_outputs = 0;
  14147. // copy output ids
  14148. {
  14149. std::vector<int32_t> output_pos;
  14150. const size_t n_batch = ctx->cparams.n_batch;
  14151. const auto & output_ids = ctx->output_ids;
  14152. output_pos.resize(ctx->output_size);
  14153. // build a more compact representation of the output ids
  14154. for (size_t i = 0; i < n_batch; ++i) {
  14155. // map an output id to a position in the batch
  14156. int32_t pos = output_ids[i];
  14157. if (pos >= 0) {
  14158. if ((size_t) pos >= n_outputs) {
  14159. n_outputs = pos + 1;
  14160. }
  14161. GGML_ASSERT((size_t) pos < ctx->output_size);
  14162. output_pos[pos] = i;
  14163. }
  14164. }
  14165. data_ctx->write(&n_outputs, sizeof(n_outputs));
  14166. if (n_outputs) {
  14167. data_ctx->write(output_pos.data(), n_outputs * sizeof(int32_t));
  14168. }
  14169. }
  14170. // copy logits
  14171. {
  14172. const size_t logits_size = std::min(ctx->logits_size, n_outputs * ctx->model.hparams.n_vocab);
  14173. data_ctx->write(&logits_size, sizeof(logits_size));
  14174. if (logits_size) {
  14175. data_ctx->write(ctx->logits, logits_size * sizeof(float));
  14176. }
  14177. }
  14178. // copy embeddings
  14179. {
  14180. const size_t embeddings_size = std::min(ctx->embd_size, n_outputs * ctx->model.hparams.n_embd);
  14181. data_ctx->write(&embeddings_size, sizeof(embeddings_size));
  14182. if (embeddings_size) {
  14183. data_ctx->write(ctx->embd, embeddings_size * sizeof(float));
  14184. }
  14185. }
  14186. }
  14187. // copy kv cache
  14188. {
  14189. const auto & kv_self = ctx->kv_self;
  14190. const auto & hparams = ctx->model.hparams;
  14191. const uint32_t n_layer = hparams.n_layer;
  14192. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  14193. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  14194. // NOTE: kv_size and kv_buf_size are mostly used for sanity checks
  14195. const uint32_t kv_head = llama_kv_cache_cell_max(kv_self);
  14196. const uint32_t kv_size = kv_self.size;
  14197. const size_t kv_buf_size = kv_self.total_size() / (kv_size ? kv_size : 1) * kv_head;
  14198. const uint32_t kv_used = kv_self.used;
  14199. const uint32_t v_trans = kv_self.v_trans ? 1 : 0;
  14200. data_ctx->write(&kv_buf_size, sizeof(kv_buf_size));
  14201. data_ctx->write(&kv_head, sizeof(kv_head));
  14202. data_ctx->write(&kv_size, sizeof(kv_size));
  14203. data_ctx->write(&kv_used, sizeof(kv_used));
  14204. data_ctx->write(&v_trans, sizeof(v_trans));
  14205. if (kv_buf_size) {
  14206. const size_t pre_kv_buf_size = data_ctx->get_size_written();
  14207. std::vector<uint8_t> tmp_buf;
  14208. for (int il = 0; il < (int) n_layer; ++il) {
  14209. const size_t k_size = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*kv_head);
  14210. tmp_buf.resize(k_size);
  14211. ggml_backend_tensor_get(kv_self.k_l[il], tmp_buf.data(), 0, tmp_buf.size());
  14212. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  14213. if (kv_self.recurrent || !kv_self.v_trans) {
  14214. // v is contiguous for recurrent models
  14215. // TODO: use other tensors for state models than k and v
  14216. const size_t v_size = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*kv_head);
  14217. tmp_buf.resize(v_size);
  14218. ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), 0, tmp_buf.size());
  14219. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  14220. continue;
  14221. }
  14222. // v is not contiguous, copy row by row
  14223. const size_t v_row_size = ggml_row_size(kv_self.v_l[il]->type, kv_head);
  14224. const size_t v_row_stride = ggml_row_size(kv_self.v_l[il]->type, kv_size);
  14225. tmp_buf.resize(v_row_size);
  14226. for (int ir = 0; ir < (int) n_embd_v_gqa; ++ir) {
  14227. ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), ir*v_row_stride, tmp_buf.size());
  14228. data_ctx->write(tmp_buf.data(), tmp_buf.size());
  14229. }
  14230. }
  14231. GGML_ASSERT(kv_buf_size == data_ctx->get_size_written() - pre_kv_buf_size);
  14232. }
  14233. for (uint32_t i = 0; i < kv_head; ++i) {
  14234. const auto & cell = kv_self.cells[i];
  14235. const llama_pos pos = cell.pos;
  14236. const size_t seq_id_size = cell.seq_id.size();
  14237. data_ctx->write(&pos, sizeof(pos));
  14238. data_ctx->write(&seq_id_size, sizeof(seq_id_size));
  14239. for (auto seq_id : cell.seq_id) {
  14240. data_ctx->write(&seq_id, sizeof(seq_id));
  14241. }
  14242. }
  14243. }
  14244. }
  14245. size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst) {
  14246. llama_data_buffer_context data_ctx(dst);
  14247. llama_state_get_data_internal(ctx, &data_ctx);
  14248. return data_ctx.get_size_written();
  14249. }
  14250. // Sets the state reading from the specified source address
  14251. size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src) {
  14252. llama_synchronize(ctx);
  14253. const uint8_t * inp = src;
  14254. // set rng
  14255. {
  14256. size_t rng_size;
  14257. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  14258. GGML_ASSERT(rng_size <= LLAMA_MAX_RNG_STATE);
  14259. std::string rng_str((const char *)inp, rng_size); inp += rng_size;
  14260. std::istringstream rng_ss(rng_str);
  14261. rng_ss >> ctx->rng;
  14262. GGML_ASSERT(!rng_ss.fail());
  14263. }
  14264. // set output ids
  14265. {
  14266. size_t n_outputs;
  14267. std::vector<int32_t> output_pos;
  14268. memcpy(&n_outputs, inp, sizeof(n_outputs)); inp += sizeof(n_outputs);
  14269. GGML_ASSERT(n_outputs <= llama_output_reserve(*ctx, n_outputs));
  14270. if (n_outputs) {
  14271. output_pos.resize(n_outputs);
  14272. memcpy(output_pos.data(), inp, n_outputs * sizeof(int32_t));
  14273. inp += n_outputs * sizeof(int32_t);
  14274. for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) {
  14275. int32_t id = output_pos[i];
  14276. GGML_ASSERT((uint32_t) id < ctx->cparams.n_batch);
  14277. ctx->output_ids[id] = i;
  14278. }
  14279. ctx->n_outputs = n_outputs;
  14280. }
  14281. }
  14282. // set logits
  14283. {
  14284. size_t logits_size;
  14285. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  14286. GGML_ASSERT(ctx->logits_size >= logits_size);
  14287. if (logits_size) {
  14288. memcpy(ctx->logits, inp, logits_size * sizeof(float));
  14289. inp += logits_size * sizeof(float);
  14290. }
  14291. }
  14292. // set embeddings
  14293. {
  14294. size_t embeddings_size;
  14295. memcpy(&embeddings_size, inp, sizeof(embeddings_size)); inp += sizeof(embeddings_size);
  14296. GGML_ASSERT(ctx->embd_size >= embeddings_size);
  14297. if (embeddings_size) {
  14298. memcpy(ctx->embd, inp, embeddings_size * sizeof(float));
  14299. inp += embeddings_size * sizeof(float);
  14300. }
  14301. }
  14302. // set kv cache
  14303. {
  14304. const auto & kv_self = ctx->kv_self;
  14305. const auto & hparams = ctx->model.hparams;
  14306. const uint32_t n_layer = hparams.n_layer;
  14307. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  14308. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  14309. size_t kv_buf_size;
  14310. uint32_t kv_head;
  14311. uint32_t kv_size;
  14312. uint32_t kv_used;
  14313. uint32_t v_trans;
  14314. memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size);
  14315. memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head);
  14316. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  14317. memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used);
  14318. memcpy(&v_trans, inp, sizeof(v_trans)); inp += sizeof(v_trans);
  14319. GGML_ASSERT(kv_self.v_trans == (bool) v_trans); // incompatible V transposition
  14320. if (kv_self.size != kv_size) {
  14321. // the KV cache needs to be big enough to load all the KV cells from the saved state
  14322. GGML_ASSERT(kv_self.size >= kv_head);
  14323. LLAMA_LOG_INFO("%s: state contains %d KV cells, was saved with kv_size=%d, but is loaded with kv_size=%d (fine, but different)\n",
  14324. __func__, kv_head, kv_size, kv_self.size);
  14325. }
  14326. llama_kv_cache_clear(ctx);
  14327. if (kv_buf_size) {
  14328. const size_t pre_kv_buf_size = inp - src;
  14329. GGML_ASSERT(kv_self.total_size() >= kv_buf_size);
  14330. for (int il = 0; il < (int) n_layer; ++il) {
  14331. const size_t k_size = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*kv_head);
  14332. ggml_backend_tensor_set(kv_self.k_l[il], inp, 0, k_size);
  14333. inp += k_size;
  14334. if (kv_self.recurrent || !kv_self.v_trans) {
  14335. // v is contiguous for recurrent models
  14336. // TODO: use other tensors for state models than k and v
  14337. const size_t v_size = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*kv_head);
  14338. ggml_backend_tensor_set(kv_self.v_l[il], inp, 0, v_size);
  14339. inp += v_size;
  14340. continue;
  14341. }
  14342. // v is not contiguous, copy row by row
  14343. const size_t v_row_size = ggml_row_size(kv_self.v_l[il]->type, kv_head);
  14344. const size_t v_row_stride = ggml_row_size(kv_self.v_l[il]->type, kv_self.size);
  14345. for (int ir = 0; ir < (int) n_embd_v_gqa; ++ir) {
  14346. ggml_backend_tensor_set(kv_self.v_l[il], inp, ir*v_row_stride, v_row_size);
  14347. inp += v_row_size;
  14348. }
  14349. }
  14350. GGML_ASSERT(kv_buf_size == inp - src - pre_kv_buf_size);
  14351. }
  14352. ctx->kv_self.head = kv_head;
  14353. ctx->kv_self.used = kv_used;
  14354. for (uint32_t i = 0; i < kv_head; ++i) {
  14355. llama_pos pos;
  14356. size_t seq_id_size;
  14357. memcpy(&pos, inp, sizeof(pos)); inp += sizeof(pos);
  14358. memcpy(&seq_id_size, inp, sizeof(seq_id_size)); inp += sizeof(seq_id_size);
  14359. ctx->kv_self.cells[i].pos = pos;
  14360. llama_seq_id seq_id;
  14361. for (size_t j = 0; j < seq_id_size; ++j) {
  14362. memcpy(&seq_id, inp, sizeof(seq_id)); inp += sizeof(seq_id);
  14363. ctx->kv_self.cells[i].seq_id.insert(seq_id);
  14364. }
  14365. }
  14366. }
  14367. const size_t nread = inp - src;
  14368. const size_t max_size = llama_state_get_size(ctx);
  14369. GGML_ASSERT(nread <= max_size);
  14370. return nread;
  14371. }
  14372. static bool llama_state_load_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  14373. llama_file file(path_session, "rb");
  14374. // sanity checks
  14375. {
  14376. const uint32_t magic = file.read_u32();
  14377. const uint32_t version = file.read_u32();
  14378. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  14379. LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  14380. return false;
  14381. }
  14382. llama_hparams session_hparams;
  14383. file.read_raw(&session_hparams, sizeof(llama_hparams));
  14384. if (session_hparams != ctx->model.hparams) {
  14385. LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
  14386. return false;
  14387. }
  14388. }
  14389. // load the prompt
  14390. {
  14391. const uint32_t n_token_count = file.read_u32();
  14392. if (n_token_count > n_token_capacity) {
  14393. LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  14394. return false;
  14395. }
  14396. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  14397. *n_token_count_out = n_token_count;
  14398. }
  14399. // restore the context state
  14400. {
  14401. const size_t n_state_size_cur = file.size - file.tell();
  14402. const size_t n_state_size_max = llama_state_get_size(ctx);
  14403. if (n_state_size_cur > n_state_size_max) {
  14404. LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  14405. return false;
  14406. }
  14407. std::vector<uint8_t> state_data(n_state_size_max);
  14408. file.read_raw(state_data.data(), n_state_size_cur);
  14409. llama_state_set_data(ctx, state_data.data());
  14410. }
  14411. return true;
  14412. }
  14413. bool llama_state_load_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  14414. try {
  14415. return llama_state_load_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  14416. } catch (const std::exception & err) {
  14417. LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
  14418. return false;
  14419. }
  14420. }
  14421. static bool llama_state_save_file_internal(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  14422. llama_file file(path_session, "wb");
  14423. file.write_u32(LLAMA_SESSION_MAGIC);
  14424. file.write_u32(LLAMA_SESSION_VERSION);
  14425. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  14426. // save the prompt
  14427. file.write_u32((uint32_t) n_token_count);
  14428. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  14429. // save the context state using stream saving
  14430. llama_data_file_context data_ctx(&file);
  14431. llama_state_get_data_internal(ctx, &data_ctx);
  14432. return true;
  14433. }
  14434. bool llama_state_save_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  14435. try {
  14436. return llama_state_save_file_internal(ctx, path_session, tokens, n_token_count);
  14437. } catch (const std::exception & err) {
  14438. LLAMA_LOG_ERROR("error saving session file: %s\n", err.what());
  14439. return false;
  14440. }
  14441. }
  14442. size_t llama_state_seq_get_size(struct llama_context* ctx, llama_seq_id seq_id) {
  14443. // save the size of size_t as a uint32_t for safety check
  14444. const size_t size_t_size_size = sizeof(uint32_t);
  14445. // other values
  14446. const size_t s_cell_count_size = sizeof(uint32_t);
  14447. const size_t s_layer_count_size = sizeof(uint32_t);
  14448. const size_t n_embd_v_gqa_size = sizeof(uint32_t);
  14449. size_t s_cell_count = 0;
  14450. size_t s_cell_data_size = 0;
  14451. const auto & kv_self = ctx->kv_self;
  14452. const auto & hparams = ctx->model.hparams;
  14453. const uint32_t n_layer = hparams.n_layer;
  14454. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  14455. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  14456. for (uint32_t i = 0; i < kv_self.size; ++i) {
  14457. const auto & cell = kv_self.cells[i];
  14458. if (cell.seq_id.count(seq_id) > 0) {
  14459. ++s_cell_count;
  14460. s_cell_data_size += sizeof(llama_pos);
  14461. }
  14462. }
  14463. for (int il = 0; il < (int)n_layer; ++il) {
  14464. // types of keys and values
  14465. s_cell_data_size += sizeof(int32_t) * 2;
  14466. // k_size_row and v_size_el values of layer
  14467. s_cell_data_size += sizeof(size_t) * 2;
  14468. // keys
  14469. const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
  14470. s_cell_data_size += k_size_row * s_cell_count;
  14471. // values (transposed)
  14472. const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
  14473. s_cell_data_size += v_size_el * s_cell_count * n_embd_v_gqa;
  14474. }
  14475. const size_t s_total = (
  14476. size_t_size_size +
  14477. s_cell_count_size +
  14478. s_layer_count_size +
  14479. n_embd_v_gqa_size +
  14480. s_cell_data_size
  14481. );
  14482. return s_total;
  14483. }
  14484. static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llama_data_context & data_ctx, llama_seq_id seq_id) {
  14485. llama_synchronize(ctx);
  14486. const auto & kv_self = ctx->kv_self;
  14487. GGML_ASSERT(!kv_self.recurrent); // not implemented
  14488. // Save the size of size_t as a uint32_t for safety check
  14489. const uint32_t size_t_size = sizeof(size_t);
  14490. data_ctx.write(&size_t_size, sizeof(size_t_size));
  14491. std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
  14492. uint32_t cell_count = 0;
  14493. // Count the number of cells with the specified seq_id
  14494. // Find all the ranges of cells with this seq id
  14495. {
  14496. uint32_t cell_range_begin = kv_self.size;
  14497. for (uint32_t i = 0; i < kv_self.size; ++i) {
  14498. const auto & cell = kv_self.cells[i];
  14499. if (cell.has_seq_id(seq_id)) {
  14500. ++cell_count;
  14501. if (cell_range_begin == kv_self.size) {
  14502. cell_range_begin = i;
  14503. }
  14504. }
  14505. else {
  14506. if (cell_range_begin != kv_self.size) {
  14507. cell_ranges.emplace_back(cell_range_begin, i);
  14508. cell_range_begin = kv_self.size;
  14509. }
  14510. }
  14511. }
  14512. if (cell_range_begin != kv_self.size) {
  14513. cell_ranges.emplace_back(cell_range_begin, kv_self.size);
  14514. }
  14515. // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
  14516. uint32_t cell_count_check = 0;
  14517. for (const auto & range : cell_ranges) {
  14518. cell_count_check += range.second - range.first;
  14519. }
  14520. GGML_ASSERT(cell_count == cell_count_check);
  14521. }
  14522. // Write the cell count
  14523. data_ctx.write(&cell_count, sizeof(cell_count));
  14524. const auto & hparams = ctx->model.hparams;
  14525. const uint32_t n_layer = hparams.n_layer;
  14526. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  14527. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  14528. // Write the layer count
  14529. data_ctx.write(&n_layer, sizeof(n_layer));
  14530. // Write n_embd_v_gqa
  14531. data_ctx.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
  14532. // Iterate the ranges and write all the pos (this is the token position in the prompt)
  14533. for (const auto & range : cell_ranges) {
  14534. for (uint32_t i = range.first; i < range.second; ++i) {
  14535. const auto & cell = kv_self.cells[i];
  14536. data_ctx.write(&cell.pos, sizeof(cell.pos));
  14537. }
  14538. }
  14539. // Iterate and write all the keys first, each row is a cell
  14540. // Get whole range at a time
  14541. std::vector<uint8_t> tmp_buf;
  14542. for (int il = 0; il < (int)n_layer; ++il) {
  14543. // Write key type
  14544. const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type;
  14545. data_ctx.write(&k_type_i, sizeof(k_type_i));
  14546. // Write row size of key
  14547. const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
  14548. data_ctx.write(&k_size_row, sizeof(k_size_row));
  14549. // Read each range of cells of k_size length each into tmp_buf and write out
  14550. for (const auto & range : cell_ranges) {
  14551. const size_t range_size = range.second - range.first;
  14552. tmp_buf.resize(range_size * k_size_row);
  14553. ggml_backend_tensor_get(kv_self.k_l[il], tmp_buf.data(), range.first * k_size_row, range_size * k_size_row);
  14554. data_ctx.write(tmp_buf.data(), tmp_buf.size());
  14555. }
  14556. }
  14557. // TODO: simplify, reduce copy-paste
  14558. if (!kv_self.v_trans) {
  14559. for (int il = 0; il < (int)n_layer; ++il) {
  14560. // Write value type
  14561. const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
  14562. data_ctx.write(&v_type_i, sizeof(v_type_i));
  14563. // Write row size of value
  14564. const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
  14565. data_ctx.write(&v_size_row, sizeof(v_size_row));
  14566. // Read each range of cells of v_size length each into tmp_buf and write out
  14567. for (const auto & range : cell_ranges) {
  14568. const size_t range_size = range.second - range.first;
  14569. tmp_buf.resize(range_size * v_size_row);
  14570. ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), range.first * v_size_row, range_size * v_size_row);
  14571. data_ctx.write(tmp_buf.data(), tmp_buf.size());
  14572. }
  14573. }
  14574. } else {
  14575. // For the values, they are transposed, so we also need the element size and get the element ranges from each row
  14576. const uint32_t kv_size = kv_self.size;
  14577. for (int il = 0; il < (int)n_layer; ++il) {
  14578. // Write value type
  14579. const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
  14580. data_ctx.write(&v_type_i, sizeof(v_type_i));
  14581. // Write element size
  14582. const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
  14583. data_ctx.write(&v_size_el, sizeof(v_size_el));
  14584. // For each row, we get the element values of each cell
  14585. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  14586. // Read each range of cells of v_size_el length each into tmp_buf and write out
  14587. for (const auto & range : cell_ranges) {
  14588. const size_t range_size = range.second - range.first;
  14589. const size_t src_offset = (range.first + j * kv_size) * v_size_el;
  14590. tmp_buf.resize(range_size * v_size_el);
  14591. ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), src_offset, tmp_buf.size());
  14592. data_ctx.write(tmp_buf.data(), tmp_buf.size());
  14593. }
  14594. }
  14595. }
  14596. }
  14597. return data_ctx.get_size_written();
  14598. }
  14599. size_t llama_state_seq_get_data(struct llama_context* ctx, uint8_t* dst, llama_seq_id seq_id) {
  14600. llama_data_buffer_context data_ctx(dst);
  14601. return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
  14602. }
  14603. size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, llama_seq_id dest_seq_id) {
  14604. llama_synchronize(ctx);
  14605. auto & kv_self = ctx->kv_self;
  14606. GGML_ASSERT(!kv_self.recurrent); // not implemented
  14607. // Wipe the slot
  14608. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14609. const uint8_t * inp = src;
  14610. // Read size of size_t
  14611. uint32_t size_t_size;
  14612. memcpy(&size_t_size, inp, sizeof(size_t_size));
  14613. inp += sizeof(size_t_size);
  14614. if (size_t_size != sizeof(size_t)) {
  14615. LLAMA_LOG_ERROR("%s: size_t size mismatch\n", __func__);
  14616. return 0;
  14617. }
  14618. // Read the cell count
  14619. uint32_t cell_count;
  14620. memcpy(&cell_count, inp, sizeof(cell_count));
  14621. inp += sizeof(cell_count);
  14622. // Read the layer count
  14623. uint32_t n_layer_ref;
  14624. memcpy(&n_layer_ref, inp, sizeof(n_layer_ref));
  14625. inp += sizeof(n_layer_ref);
  14626. // Read n_embd_v_gqa
  14627. uint32_t n_embd_v_gqa_ref;
  14628. memcpy(&n_embd_v_gqa_ref, inp, sizeof(n_embd_v_gqa_ref));
  14629. inp += sizeof(n_embd_v_gqa_ref);
  14630. // Sanity check model compatibility
  14631. const auto & hparams = ctx->model.hparams;
  14632. const uint32_t n_layer = hparams.n_layer;
  14633. const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa() + hparams.n_embd_k_s();
  14634. const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa() + hparams.n_embd_v_s();
  14635. if (n_layer != n_layer_ref) {
  14636. LLAMA_LOG_ERROR("%s: mismatched n_layer (%d != %d)\n", __func__, n_layer, n_layer_ref);
  14637. return 0;
  14638. }
  14639. if (n_embd_v_gqa != n_embd_v_gqa_ref) {
  14640. LLAMA_LOG_ERROR("%s: mismatched n_embd_v_gqa (%d != %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref);
  14641. return 0;
  14642. }
  14643. // Allocate the new cells for the slot
  14644. if (cell_count) {
  14645. llama_batch batch = llama_batch_init(cell_count, 0, 1);
  14646. batch.n_tokens = cell_count;
  14647. for (uint32_t i = 0; i < cell_count; ++i) {
  14648. llama_pos pos;
  14649. memcpy(&pos, inp, sizeof(pos));
  14650. inp += sizeof(pos);
  14651. batch.pos[i] = pos;
  14652. batch.n_seq_id[i] = 1;
  14653. batch.seq_id[i][0] = dest_seq_id;
  14654. }
  14655. if (!llama_kv_cache_find_slot(kv_self, batch)) {
  14656. llama_batch_free(batch);
  14657. LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
  14658. return 0;
  14659. }
  14660. // DEBUG CHECK: kv_self.head should be our first cell, kv_self.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
  14661. // Assume that this is one contiguous block of cells
  14662. GGML_ASSERT(kv_self.head + cell_count <= kv_self.size);
  14663. GGML_ASSERT(kv_self.cells[kv_self.head].pos == batch.pos[0]);
  14664. GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].pos == batch.pos[cell_count - 1]);
  14665. GGML_ASSERT(kv_self.cells[kv_self.head].has_seq_id(dest_seq_id));
  14666. GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].has_seq_id(dest_seq_id));
  14667. // Cleanup
  14668. llama_batch_free(batch);
  14669. }
  14670. const uint32_t kv_size = kv_self.size;
  14671. const uint32_t kv_head = kv_self.head;
  14672. // For each layer, read the keys for each cell, one row is one cell, read as one contiguous blo
  14673. for (int il = 0; il < (int)n_layer; ++il) {
  14674. // Read type of key
  14675. int32_t k_type_i_ref;
  14676. memcpy(&k_type_i_ref, inp, sizeof(k_type_i_ref));
  14677. inp += sizeof(k_type_i_ref);
  14678. const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type;
  14679. if (k_type_i != k_type_i_ref) {
  14680. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14681. LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
  14682. return 0;
  14683. }
  14684. // Read row size of key
  14685. size_t k_size_row_ref;
  14686. memcpy(&k_size_row_ref, inp, sizeof(k_size_row_ref));
  14687. inp += sizeof(k_size_row_ref);
  14688. const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
  14689. if (k_size_row != k_size_row_ref) {
  14690. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14691. LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, k_size_row_ref, il);
  14692. return 0;
  14693. }
  14694. if (cell_count) {
  14695. // Read and set the keys for the whole cell range
  14696. ggml_backend_tensor_set(kv_self.k_l[il], inp, kv_head * k_size_row, cell_count * k_size_row);
  14697. inp += cell_count * k_size_row;
  14698. }
  14699. }
  14700. // TODO: simplify, reduce copy-paste
  14701. if (!kv_self.v_trans) {
  14702. for (int il = 0; il < (int)n_layer; ++il) {
  14703. // Read type of value
  14704. int32_t v_type_i_ref;
  14705. memcpy(&v_type_i_ref, inp, sizeof(v_type_i_ref));
  14706. inp += sizeof(v_type_i_ref);
  14707. const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
  14708. if (v_type_i != v_type_i_ref) {
  14709. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14710. LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
  14711. return 0;
  14712. }
  14713. // Read row size of value
  14714. size_t v_size_row_ref;
  14715. memcpy(&v_size_row_ref, inp, sizeof(v_size_row_ref));
  14716. inp += sizeof(v_size_row_ref);
  14717. const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
  14718. if (v_size_row != v_size_row_ref) {
  14719. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14720. LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, v_size_row_ref, il);
  14721. return 0;
  14722. }
  14723. if (cell_count) {
  14724. // Read and set the values for the whole cell range
  14725. ggml_backend_tensor_set(kv_self.v_l[il], inp, kv_head * v_size_row, cell_count * v_size_row);
  14726. inp += cell_count * v_size_row;
  14727. }
  14728. }
  14729. } else {
  14730. // For each layer, read the values for each cell (transposed)
  14731. for (int il = 0; il < (int)n_layer; ++il) {
  14732. // Read type of value
  14733. int32_t v_type_i_ref;
  14734. memcpy(&v_type_i_ref, inp, sizeof(v_type_i_ref));
  14735. inp += sizeof(v_type_i_ref);
  14736. const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
  14737. if (v_type_i != v_type_i_ref) {
  14738. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14739. LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
  14740. return 0;
  14741. }
  14742. // Read element size of value
  14743. size_t v_size_el_ref;
  14744. memcpy(&v_size_el_ref, inp, sizeof(v_size_el_ref));
  14745. inp += sizeof(v_size_el_ref);
  14746. const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
  14747. if (v_size_el != v_size_el_ref) {
  14748. llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
  14749. LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, v_size_el_ref, il);
  14750. return 0;
  14751. }
  14752. if (cell_count) {
  14753. // For each row in the transposed matrix, read the values for the whole cell range
  14754. for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
  14755. const size_t dst_offset = (kv_head + j * kv_size) * v_size_el;
  14756. ggml_backend_tensor_set(kv_self.v_l[il], inp, dst_offset, cell_count * v_size_el);
  14757. inp += cell_count * v_size_el;
  14758. }
  14759. }
  14760. }
  14761. }
  14762. const size_t nread = inp - src;
  14763. return nread;
  14764. }
  14765. static size_t llama_state_seq_save_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
  14766. llama_file file(filepath, "wb");
  14767. file.write_u32(LLAMA_STATE_SEQ_MAGIC);
  14768. file.write_u32(LLAMA_STATE_SEQ_VERSION);
  14769. // save the prompt
  14770. file.write_u32((uint32_t)n_token_count);
  14771. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  14772. // save the context state using stream saving
  14773. llama_data_file_context data_ctx(&file);
  14774. llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
  14775. const size_t res = file.tell();
  14776. GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + data_ctx.get_size_written());
  14777. return res;
  14778. }
  14779. static size_t llama_state_seq_load_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  14780. llama_file file(filepath, "rb");
  14781. // version checks
  14782. {
  14783. const uint32_t magic = file.read_u32();
  14784. const uint32_t version = file.read_u32();
  14785. if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) {
  14786. LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version);
  14787. return 0;
  14788. }
  14789. }
  14790. // load the prompt
  14791. {
  14792. const uint32_t n_token_count = file.read_u32();
  14793. if (n_token_count > n_token_capacity) {
  14794. LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  14795. return 0;
  14796. }
  14797. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  14798. *n_token_count_out = n_token_count;
  14799. }
  14800. // restore the context state
  14801. {
  14802. const size_t state_size = file.size - file.tell();
  14803. std::vector<uint8_t> state_data(state_size);
  14804. file.read_raw(state_data.data(), state_size);
  14805. const size_t nread = llama_state_seq_set_data(ctx, state_data.data(), dest_seq_id);
  14806. if (!nread) {
  14807. LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__);
  14808. return 0;
  14809. }
  14810. GGML_ASSERT(nread <= state_size);
  14811. GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell());
  14812. }
  14813. return file.tell();
  14814. }
  14815. size_t llama_state_seq_save_file(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
  14816. try {
  14817. return llama_state_seq_save_file_internal(ctx, filepath, seq_id, tokens, n_token_count);
  14818. } catch (const std::exception & err) {
  14819. LLAMA_LOG_ERROR("error saving sequence state file: %s\n", err.what());
  14820. return 0;
  14821. }
  14822. }
  14823. size_t llama_state_seq_load_file(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  14824. try {
  14825. return llama_state_seq_load_file_internal(ctx, filepath, dest_seq_id, tokens_out, n_token_capacity, n_token_count_out);
  14826. } catch (const std::exception & err) {
  14827. LLAMA_LOG_ERROR("error loading sequence state file: %s\n", err.what());
  14828. return 0;
  14829. }
  14830. }
  14831. void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch) {
  14832. ctx->cparams.n_threads = n_threads;
  14833. ctx->cparams.n_threads_batch = n_threads_batch;
  14834. }
  14835. uint32_t llama_n_threads(struct llama_context * ctx) {
  14836. return ctx->cparams.n_threads;
  14837. }
  14838. uint32_t llama_n_threads_batch(struct llama_context * ctx) {
  14839. return ctx->cparams.n_threads_batch;
  14840. }
  14841. void llama_set_abort_callback(struct llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) {
  14842. ctx->abort_callback = abort_callback;
  14843. ctx->abort_callback_data = abort_callback_data;
  14844. }
  14845. void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn) {
  14846. ctx->cparams.causal_attn = causal_attn;
  14847. }
  14848. struct llama_batch llama_batch_get_one(
  14849. llama_token * tokens,
  14850. int32_t n_tokens,
  14851. llama_pos pos_0,
  14852. llama_seq_id seq_id) {
  14853. return {
  14854. /*n_tokens =*/ n_tokens,
  14855. /*tokens =*/ tokens,
  14856. /*embd =*/ nullptr,
  14857. /*pos =*/ nullptr,
  14858. /*n_seq_id =*/ nullptr,
  14859. /*seq_id =*/ nullptr,
  14860. /*logits =*/ nullptr,
  14861. /*all_pos_0 =*/ pos_0,
  14862. /*all_pos_1 =*/ 1,
  14863. /*all_seq_id =*/ seq_id,
  14864. };
  14865. }
  14866. struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
  14867. llama_batch batch = { 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, 0, 0, 0, };
  14868. if (embd) {
  14869. batch.embd = (float *) malloc(sizeof(float) * n_tokens_alloc * embd);
  14870. } else {
  14871. batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens_alloc);
  14872. }
  14873. batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens_alloc);
  14874. batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens_alloc);
  14875. batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * (n_tokens_alloc + 1));
  14876. for (int i = 0; i < n_tokens_alloc; ++i) {
  14877. batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
  14878. }
  14879. batch.seq_id[n_tokens_alloc] = nullptr;
  14880. batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens_alloc);
  14881. return batch;
  14882. }
  14883. void llama_batch_free(struct llama_batch batch) {
  14884. if (batch.token) free(batch.token);
  14885. if (batch.embd) free(batch.embd);
  14886. if (batch.pos) free(batch.pos);
  14887. if (batch.n_seq_id) free(batch.n_seq_id);
  14888. if (batch.seq_id) {
  14889. for (int i = 0; batch.seq_id[i] != nullptr; ++i) {
  14890. free(batch.seq_id[i]);
  14891. }
  14892. free(batch.seq_id);
  14893. }
  14894. if (batch.logits) free(batch.logits);
  14895. }
  14896. int32_t llama_decode(
  14897. struct llama_context * ctx,
  14898. struct llama_batch batch) {
  14899. const int ret = llama_decode_internal(*ctx, batch);
  14900. if (ret < 0) {
  14901. LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
  14902. }
  14903. return ret;
  14904. }
  14905. void llama_synchronize(struct llama_context * ctx) {
  14906. ggml_backend_sched_synchronize(ctx->sched);
  14907. // FIXME: if multiple single tokens are evaluated without a synchronization,
  14908. // the stats will be added to the prompt evaluation stats
  14909. // this should only happen when using batch size 1 to evaluate a batch
  14910. // add the evaluation to the stats
  14911. if (ctx->n_queued_tokens == 1) {
  14912. ctx->t_eval_us += ggml_time_us() - ctx->t_compute_start_us;
  14913. ctx->n_eval++;
  14914. } else if (ctx->n_queued_tokens > 1) {
  14915. ctx->t_p_eval_us += ggml_time_us() - ctx->t_compute_start_us;
  14916. ctx->n_p_eval += ctx->n_queued_tokens;
  14917. }
  14918. // get a more accurate load time, upon first eval
  14919. if (ctx->n_queued_tokens > 0 && !ctx->has_evaluated_once) {
  14920. ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
  14921. ctx->has_evaluated_once = true;
  14922. }
  14923. ctx->n_queued_tokens = 0;
  14924. ctx->t_compute_start_us = 0;
  14925. }
  14926. float * llama_get_logits(struct llama_context * ctx) {
  14927. llama_synchronize(ctx);
  14928. return ctx->logits;
  14929. }
  14930. float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
  14931. int32_t j = -1;
  14932. llama_synchronize(ctx);
  14933. try {
  14934. if (ctx->logits == nullptr) {
  14935. throw std::runtime_error("no logits");
  14936. }
  14937. if (i < 0) {
  14938. j = ctx->n_outputs + i;
  14939. if (j < 0) {
  14940. throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
  14941. }
  14942. } else if ((size_t) i >= ctx->output_ids.size()) {
  14943. throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size()));
  14944. } else {
  14945. j = ctx->output_ids[i];
  14946. }
  14947. if (j < 0) {
  14948. throw std::runtime_error(format("batch.logits[%d] != true", i));
  14949. }
  14950. if (j >= ctx->n_outputs) {
  14951. // This should not happen
  14952. throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
  14953. }
  14954. return ctx->logits + j*ctx->model.hparams.n_vocab;
  14955. } catch (const std::exception & err) {
  14956. LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what());
  14957. #ifndef NDEBUG
  14958. GGML_ASSERT(false);
  14959. #endif
  14960. return nullptr;
  14961. }
  14962. }
  14963. float * llama_get_embeddings(struct llama_context * ctx) {
  14964. llama_synchronize(ctx);
  14965. return ctx->embd;
  14966. }
  14967. float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
  14968. int32_t j = -1;
  14969. llama_synchronize(ctx);
  14970. try {
  14971. if (ctx->embd == nullptr) {
  14972. throw std::runtime_error("no embeddings");
  14973. }
  14974. if (i < 0) {
  14975. j = ctx->n_outputs + i;
  14976. if (j < 0) {
  14977. throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
  14978. }
  14979. } else if ((size_t) i >= ctx->output_ids.size()) {
  14980. throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size()));
  14981. } else {
  14982. j = ctx->output_ids[i];
  14983. }
  14984. if (j < 0) {
  14985. throw std::runtime_error(format("batch.logits[%d] != true", i));
  14986. }
  14987. if (j >= ctx->n_outputs) {
  14988. // This should not happen
  14989. throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
  14990. }
  14991. return ctx->embd + j*ctx->model.hparams.n_embd;
  14992. } catch (const std::exception & err) {
  14993. LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what());
  14994. #ifndef NDEBUG
  14995. GGML_ASSERT(false);
  14996. #endif
  14997. return nullptr;
  14998. }
  14999. }
  15000. float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id) {
  15001. llama_synchronize(ctx);
  15002. auto it = ctx->embd_seq.find(seq_id);
  15003. if (it == ctx->embd_seq.end()) {
  15004. return nullptr;
  15005. }
  15006. return it->second.data();
  15007. }
  15008. const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
  15009. GGML_ASSERT(model->vocab.type != LLAMA_VOCAB_TYPE_NONE);
  15010. return model->vocab.id_to_token[token].text.c_str();
  15011. }
  15012. float llama_token_get_score(const struct llama_model * model, llama_token token) {
  15013. GGML_ASSERT(model->vocab.type != LLAMA_VOCAB_TYPE_NONE);
  15014. return model->vocab.id_to_token[token].score;
  15015. }
  15016. llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token) {
  15017. GGML_ASSERT(model->vocab.type != LLAMA_VOCAB_TYPE_NONE);
  15018. return model->vocab.id_to_token[token].attr;
  15019. }
  15020. bool llama_token_is_eog(const struct llama_model * model, llama_token token) {
  15021. return token != -1 && (
  15022. token == llama_token_eos(model) ||
  15023. token == llama_token_eot(model)
  15024. );
  15025. }
  15026. bool llama_token_is_control(const struct llama_model * model, llama_token token) {
  15027. return llama_is_control_token(model->vocab, token);
  15028. }
  15029. llama_token llama_token_bos(const struct llama_model * model) {
  15030. return model->vocab.special_bos_id;
  15031. }
  15032. llama_token llama_token_eos(const struct llama_model * model) {
  15033. return model->vocab.special_eos_id;
  15034. }
  15035. llama_token llama_token_cls(const struct llama_model * model) {
  15036. return model->vocab.special_cls_id;
  15037. }
  15038. llama_token llama_token_sep(const struct llama_model * model) {
  15039. return model->vocab.special_sep_id;
  15040. }
  15041. llama_token llama_token_nl(const struct llama_model * model) {
  15042. return model->vocab.linefeed_id;
  15043. }
  15044. int32_t llama_add_bos_token(const struct llama_model * model) {
  15045. return model->vocab.special_add_bos;
  15046. }
  15047. int32_t llama_add_eos_token(const struct llama_model * model) {
  15048. return model->vocab.special_add_eos;
  15049. }
  15050. llama_token llama_token_prefix(const struct llama_model * model) {
  15051. return model->vocab.special_prefix_id;
  15052. }
  15053. llama_token llama_token_middle(const struct llama_model * model) {
  15054. return model->vocab.special_middle_id;
  15055. }
  15056. llama_token llama_token_suffix(const struct llama_model * model) {
  15057. return model->vocab.special_suffix_id;
  15058. }
  15059. llama_token llama_token_eot(const struct llama_model * model) {
  15060. return model->vocab.special_eot_id;
  15061. }
  15062. int32_t llama_tokenize(
  15063. const struct llama_model * model,
  15064. const char * text,
  15065. int32_t text_len,
  15066. llama_token * tokens,
  15067. int32_t n_tokens_max,
  15068. bool add_special,
  15069. bool parse_special) {
  15070. auto res = llama_tokenize_internal(model->vocab, std::string(text, text_len), add_special, parse_special);
  15071. if (n_tokens_max < (int) res.size()) {
  15072. // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
  15073. return -((int) res.size());
  15074. }
  15075. for (size_t i = 0; i < res.size(); i++) {
  15076. tokens[i] = res[i];
  15077. }
  15078. return res.size();
  15079. }
  15080. static std::string llama_decode_text(const std::string & text) {
  15081. std::string decoded_text;
  15082. const auto cpts = unicode_cpts_from_utf8(text);
  15083. for (const auto cpt : cpts) {
  15084. const auto utf8 = unicode_cpt_to_utf8(cpt);
  15085. try {
  15086. decoded_text += unicode_utf8_to_byte(utf8);
  15087. } catch (const std::out_of_range & e) {
  15088. decoded_text += "[UNK_BYTE_0x";
  15089. for (const auto c : utf8) {
  15090. decoded_text += format("%02x", (uint8_t) c);
  15091. }
  15092. decoded_text += text + "]";
  15093. }
  15094. }
  15095. return decoded_text;
  15096. }
  15097. // does not write null-terminator to buf
  15098. int32_t llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int32_t length, bool special) {
  15099. // ref: https://github.com/ggerganov/llama.cpp/pull/7587#discussion_r1620983843
  15100. if (!special && llama_is_control_token(model->vocab, token)) {
  15101. return 0;
  15102. }
  15103. // if we have a cache - use it
  15104. {
  15105. const auto & cache = model->vocab.cache_token_to_piece;
  15106. if (!cache.empty()) {
  15107. const auto & res = cache.at(token);
  15108. if (length < (int) res.size()) {
  15109. return -(int) res.size();
  15110. }
  15111. memcpy(buf, res.c_str(), res.size());
  15112. return res.size();
  15113. }
  15114. }
  15115. if (0 <= token && token < llama_n_vocab(model)) {
  15116. switch (llama_vocab_get_type(model->vocab)) {
  15117. case LLAMA_VOCAB_TYPE_WPM:
  15118. case LLAMA_VOCAB_TYPE_SPM: {
  15119. // NOTE: we accept all unsupported token types,
  15120. // suppressing them like CONTROL tokens.
  15121. if (llama_is_normal_token(model->vocab, token)) {
  15122. std::string result = model->vocab.id_to_token[token].text;
  15123. llama_unescape_whitespace(result);
  15124. if (length < (int) result.length()) {
  15125. return -(int) result.length();
  15126. }
  15127. memcpy(buf, result.c_str(), result.length());
  15128. return result.length();
  15129. } else if (
  15130. (llama_is_user_defined_token(model->vocab, token)) ||
  15131. (llama_is_control_token (model->vocab, token) && special)) {
  15132. std::string result = model->vocab.id_to_token[token].text;
  15133. if (length < (int) result.length()) {
  15134. return -(int) result.length();
  15135. }
  15136. memcpy(buf, result.c_str(), result.length());
  15137. return result.length();
  15138. } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
  15139. if (length < 3) {
  15140. return -3;
  15141. }
  15142. memcpy(buf, "\xe2\x96\x85", 3);
  15143. return 3;
  15144. } else if (llama_is_byte_token(model->vocab, token)) {
  15145. if (length < 1) {
  15146. return -1;
  15147. }
  15148. buf[0] = llama_token_to_byte(model->vocab, token);
  15149. return 1;
  15150. }
  15151. break;
  15152. }
  15153. case LLAMA_VOCAB_TYPE_BPE: {
  15154. // NOTE: we accept all unsupported token types,
  15155. // suppressing them like CONTROL tokens.
  15156. if (llama_is_normal_token(model->vocab, token)) {
  15157. std::string result = model->vocab.id_to_token[token].text;
  15158. result = llama_decode_text(result);
  15159. if (length < (int) result.length()) {
  15160. return -(int) result.length();
  15161. }
  15162. memcpy(buf, result.c_str(), result.length());
  15163. return result.length();
  15164. } else if (
  15165. (llama_is_user_defined_token(model->vocab, token)) ||
  15166. (llama_is_control_token (model->vocab, token) && special)) {
  15167. std::string result = model->vocab.id_to_token[token].text;
  15168. if (length < (int) result.length()) {
  15169. return -(int) result.length();
  15170. }
  15171. memcpy(buf, result.c_str(), result.length());
  15172. return result.length();
  15173. }
  15174. break;
  15175. }
  15176. default:
  15177. GGML_ASSERT(false);
  15178. }
  15179. }
  15180. return 0;
  15181. }
  15182. // trim whitespace from the beginning and end of a string
  15183. static std::string trim(const std::string & str) {
  15184. size_t start = 0;
  15185. size_t end = str.size();
  15186. while (start < end && isspace(str[start])) {
  15187. start += 1;
  15188. }
  15189. while (end > start && isspace(str[end - 1])) {
  15190. end -= 1;
  15191. }
  15192. return str.substr(start, end - start);
  15193. }
  15194. // Simple version of "llama_apply_chat_template" that only works with strings
  15195. // This function uses heuristic checks to determine commonly used template. It is not a jinja parser.
  15196. static int32_t llama_chat_apply_template_internal(
  15197. const std::string & tmpl,
  15198. const std::vector<const llama_chat_message *> & chat,
  15199. std::string & dest, bool add_ass) {
  15200. // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527
  15201. std::stringstream ss;
  15202. if (tmpl == "chatml" || tmpl.find("<|im_start|>") != std::string::npos) {
  15203. // chatml template
  15204. for (auto message : chat) {
  15205. ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n";
  15206. }
  15207. if (add_ass) {
  15208. ss << "<|im_start|>assistant\n";
  15209. }
  15210. } else if (tmpl == "llama2" || tmpl.find("[INST]") != std::string::npos) {
  15211. // llama2 template and its variants
  15212. // [variant] support system message
  15213. bool support_system_message = tmpl.find("<<SYS>>") != std::string::npos;
  15214. // [variant] space before + after response
  15215. bool space_around_response = tmpl.find("' ' + eos_token") != std::string::npos;
  15216. // [variant] add BOS inside history
  15217. bool add_bos_inside_history = tmpl.find("bos_token + '[INST]") != std::string::npos;
  15218. // [variant] trim spaces from the input message
  15219. bool strip_message = tmpl.find("content.strip()") != std::string::npos;
  15220. // construct the prompt
  15221. bool is_inside_turn = true; // skip BOS at the beginning
  15222. ss << "[INST] ";
  15223. for (auto message : chat) {
  15224. std::string content = strip_message ? trim(message->content) : message->content;
  15225. std::string role(message->role);
  15226. if (!is_inside_turn) {
  15227. is_inside_turn = true;
  15228. ss << (add_bos_inside_history ? "<s>[INST] " : "[INST] ");
  15229. }
  15230. if (role == "system") {
  15231. if (support_system_message) {
  15232. ss << "<<SYS>>\n" << content << "\n<</SYS>>\n\n";
  15233. } else {
  15234. // if the model does not support system message, we still include it in the first message, but without <<SYS>>
  15235. ss << content << "\n";
  15236. }
  15237. } else if (role == "user") {
  15238. ss << content << " [/INST]";
  15239. } else {
  15240. ss << (space_around_response ? " " : "") << content << (space_around_response ? " " : "") << "</s>";
  15241. is_inside_turn = false;
  15242. }
  15243. }
  15244. // llama2 templates seem to not care about "add_generation_prompt"
  15245. } else if (tmpl == "phi3" || (tmpl.find("<|assistant|>") != std::string::npos && tmpl.find("<|end|>") != std::string::npos)) {
  15246. // Phi 3
  15247. for (auto message : chat) {
  15248. std::string role(message->role);
  15249. ss << "<|" << role << "|>\n" << message->content << "<|end|>\n";
  15250. }
  15251. if (add_ass) {
  15252. ss << "<|assistant|>\n";
  15253. }
  15254. } else if (tmpl == "zephyr" || tmpl.find("<|user|>") != std::string::npos) {
  15255. // zephyr template
  15256. for (auto message : chat) {
  15257. ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n";
  15258. }
  15259. if (add_ass) {
  15260. ss << "<|assistant|>\n";
  15261. }
  15262. } else if (tmpl == "monarch" || tmpl.find("bos_token + message['role']") != std::string::npos) {
  15263. // mlabonne/AlphaMonarch-7B template (the <s> is included inside history)
  15264. for (auto message : chat) {
  15265. std::string bos = (message == chat.front()) ? "" : "<s>"; // skip BOS for first message
  15266. ss << bos << message->role << "\n" << message->content << "</s>\n";
  15267. }
  15268. if (add_ass) {
  15269. ss << "<s>assistant\n";
  15270. }
  15271. } else if (tmpl == "gemma" || tmpl.find("<start_of_turn>") != std::string::npos) {
  15272. // google/gemma-7b-it
  15273. std::string system_prompt = "";
  15274. for (auto message : chat) {
  15275. std::string role(message->role);
  15276. if (role == "system") {
  15277. // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken
  15278. system_prompt = trim(message->content);
  15279. continue;
  15280. }
  15281. // in gemma, "assistant" is "model"
  15282. role = role == "assistant" ? "model" : message->role;
  15283. ss << "<start_of_turn>" << role << "\n";
  15284. if (!system_prompt.empty() && role != "model") {
  15285. ss << system_prompt << "\n\n";
  15286. system_prompt = "";
  15287. }
  15288. ss << trim(message->content) << "<end_of_turn>\n";
  15289. }
  15290. if (add_ass) {
  15291. ss << "<start_of_turn>model\n";
  15292. }
  15293. } else if (tmpl == "orion" || tmpl.find("'\\n\\nAssistant: ' + eos_token") != std::string::npos) {
  15294. // OrionStarAI/Orion-14B-Chat
  15295. std::string system_prompt = "";
  15296. for (auto message : chat) {
  15297. std::string role(message->role);
  15298. if (role == "system") {
  15299. // there is no system message support, we will merge it with user prompt
  15300. system_prompt = message->content;
  15301. continue;
  15302. } else if (role == "user") {
  15303. ss << "Human: ";
  15304. if (!system_prompt.empty()) {
  15305. ss << system_prompt << "\n\n";
  15306. system_prompt = "";
  15307. }
  15308. ss << message->content << "\n\nAssistant: </s>";
  15309. } else {
  15310. ss << message->content << "</s>";
  15311. }
  15312. }
  15313. } else if (tmpl == "openchat" || tmpl.find("GPT4 Correct ") != std::string::npos) {
  15314. // openchat/openchat-3.5-0106,
  15315. for (auto message : chat) {
  15316. std::string role(message->role);
  15317. if (role == "system") {
  15318. ss << message->content << "<|end_of_turn|>";
  15319. } else {
  15320. role[0] = toupper(role[0]);
  15321. ss << "GPT4 Correct " << role << ": " << message->content << "<|end_of_turn|>";
  15322. }
  15323. }
  15324. if (add_ass) {
  15325. ss << "GPT4 Correct Assistant:";
  15326. }
  15327. } else if (tmpl == "vicuna" || tmpl == "vicuna-orca" || (tmpl.find("USER: ") != std::string::npos && tmpl.find("ASSISTANT: ") != std::string::npos)) {
  15328. // eachadea/vicuna-13b-1.1 (and Orca variant)
  15329. for (auto message : chat) {
  15330. std::string role(message->role);
  15331. if (role == "system") {
  15332. // Orca-Vicuna variant uses a system prefix
  15333. if (tmpl == "vicuna-orca" || tmpl.find("SYSTEM: ") != std::string::npos) {
  15334. ss << "SYSTEM: " << message->content << "\n";
  15335. } else {
  15336. ss << message->content << "\n\n";
  15337. }
  15338. } else if (role == "user") {
  15339. ss << "USER: " << message->content << "\n";
  15340. } else if (role == "assistant") {
  15341. ss << "ASSISTANT: " << message->content << "</s>\n";
  15342. }
  15343. }
  15344. if (add_ass) {
  15345. ss << "ASSISTANT:";
  15346. }
  15347. } else if (tmpl == "deepseek" || (tmpl.find("### Instruction:") != std::string::npos && tmpl.find("<|EOT|>") != std::string::npos)) {
  15348. // deepseek-ai/deepseek-coder-33b-instruct
  15349. for (auto message : chat) {
  15350. std::string role(message->role);
  15351. if (role == "system") {
  15352. ss << message->content;
  15353. } else if (role == "user") {
  15354. ss << "### Instruction:\n" << message->content << "\n";
  15355. } else if (role == "assistant") {
  15356. ss << "### Response:\n" << message->content << "\n<|EOT|>\n";
  15357. }
  15358. }
  15359. if (add_ass) {
  15360. ss << "### Response:\n";
  15361. }
  15362. } else if (tmpl == "command-r" || (tmpl.find("<|START_OF_TURN_TOKEN|>") != std::string::npos && tmpl.find("<|USER_TOKEN|>") != std::string::npos)) {
  15363. // CohereForAI/c4ai-command-r-plus
  15364. for (auto message : chat) {
  15365. std::string role(message->role);
  15366. if (role == "system") {
  15367. ss << "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
  15368. } else if (role == "user") {
  15369. ss << "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
  15370. } else if (role == "assistant") {
  15371. ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
  15372. }
  15373. }
  15374. if (add_ass) {
  15375. ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>";
  15376. }
  15377. } else if (tmpl == "llama3" || (tmpl.find("<|start_header_id|>") != std::string::npos && tmpl.find("<|end_header_id|>") != std::string::npos)) {
  15378. // Llama 3
  15379. for (auto message : chat) {
  15380. std::string role(message->role);
  15381. ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>";
  15382. }
  15383. if (add_ass) {
  15384. ss << "<|start_header_id|>assistant<|end_header_id|>\n\n";
  15385. }
  15386. } else {
  15387. // template not supported
  15388. return -1;
  15389. }
  15390. dest = ss.str();
  15391. return dest.size();
  15392. }
  15393. LLAMA_API int32_t llama_chat_apply_template(
  15394. const struct llama_model * model,
  15395. const char * tmpl,
  15396. const struct llama_chat_message * chat,
  15397. size_t n_msg,
  15398. bool add_ass,
  15399. char * buf,
  15400. int32_t length) {
  15401. std::string curr_tmpl(tmpl == nullptr ? "" : tmpl);
  15402. if (tmpl == nullptr) {
  15403. GGML_ASSERT(model != nullptr);
  15404. // load template from model
  15405. std::vector<char> model_template(2048, 0); // longest known template is about 1200 bytes
  15406. std::string template_key = "tokenizer.chat_template";
  15407. int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
  15408. if (res < 0) {
  15409. // worst case: there is no information about template, we will use chatml by default
  15410. curr_tmpl = "chatml"; // see llama_chat_apply_template_internal
  15411. } else {
  15412. curr_tmpl = std::string(model_template.data(), model_template.size());
  15413. }
  15414. }
  15415. // format the chat to string
  15416. std::vector<const llama_chat_message *> chat_vec;
  15417. chat_vec.resize(n_msg);
  15418. for (size_t i = 0; i < n_msg; i++) {
  15419. chat_vec[i] = &chat[i];
  15420. }
  15421. std::string formatted_chat;
  15422. int32_t res = llama_chat_apply_template_internal(curr_tmpl, chat_vec, formatted_chat, add_ass);
  15423. if (res < 0) {
  15424. return res;
  15425. }
  15426. if (buf && length > 0) {
  15427. strncpy(buf, formatted_chat.c_str(), length);
  15428. }
  15429. return res;
  15430. }
  15431. LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count) {
  15432. static const char * const SPLIT_PATH_FORMAT = "%s-%05d-of-%05d.gguf";
  15433. if (snprintf(split_path, maxlen, SPLIT_PATH_FORMAT, path_prefix, split_no + 1, split_count)) {
  15434. return strlen(split_path);
  15435. }
  15436. return 0;
  15437. }
  15438. int llama_split_prefix(char * dest, size_t maxlen, const char * split_path, int split_no, int split_count) {
  15439. std::string str_split_path(split_path);
  15440. char postfix[32];
  15441. snprintf(postfix, 32, "-%05d-of-%05d.gguf", split_no + 1, split_count);
  15442. std::string str_postfix(postfix);
  15443. // check if dest ends with postfix
  15444. int size_prefix = str_split_path.size() - str_postfix.size();
  15445. if (size_prefix > 0 && str_split_path.find(str_postfix, size_prefix) != std::string::npos) {
  15446. snprintf(dest, std::min((size_t) size_prefix + 1, maxlen), "%s", split_path);
  15447. return size_prefix;
  15448. }
  15449. return 0;
  15450. }
  15451. struct llama_timings llama_get_timings(struct llama_context * ctx) {
  15452. struct llama_timings result = {
  15453. /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
  15454. /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
  15455. /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
  15456. /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
  15457. /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
  15458. /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
  15459. /*.n_sample =*/ std::max(1, ctx->n_sample),
  15460. /*.n_p_eval =*/ std::max(0, ctx->n_p_eval),
  15461. /*.n_eval =*/ std::max(1, ctx->n_eval),
  15462. };
  15463. return result;
  15464. }
  15465. void llama_print_timings(struct llama_context * ctx) {
  15466. const llama_timings timings = llama_get_timings(ctx);
  15467. LLAMA_LOG_INFO("\n");
  15468. LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms);
  15469. LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  15470. __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
  15471. LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  15472. __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
  15473. LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  15474. __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
  15475. LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (timings.t_end_ms - timings.t_start_ms), (timings.n_p_eval + timings.n_eval));
  15476. }
  15477. void llama_reset_timings(struct llama_context * ctx) {
  15478. ctx->t_start_us = ggml_time_us();
  15479. ctx->t_sample_us = ctx->n_sample = 0;
  15480. ctx->t_eval_us = ctx->n_eval = 0;
  15481. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  15482. }
  15483. const char * llama_print_system_info(void) {
  15484. static std::string s;
  15485. s = "";
  15486. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  15487. s += "AVX_VNNI = " + std::to_string(ggml_cpu_has_avx_vnni()) + " | ";
  15488. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  15489. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  15490. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  15491. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  15492. s += "AVX512_BF16 = " + std::to_string(ggml_cpu_has_avx512_bf16()) + " | ";
  15493. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  15494. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  15495. s += "SVE = " + std::to_string(ggml_cpu_has_sve()) + " | ";
  15496. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  15497. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  15498. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  15499. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  15500. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  15501. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  15502. s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
  15503. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  15504. s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
  15505. #ifdef GGML_USE_LLAMAFILE
  15506. s += "LLAMAFILE = 1 | ";
  15507. #else
  15508. s += "LLAMAFILE = 0 | ";
  15509. #endif
  15510. return s.c_str();
  15511. }
  15512. void llama_dump_timing_info_yaml(FILE * stream, const llama_context * ctx) {
  15513. fprintf(stream, "\n");
  15514. fprintf(stream, "###########\n");
  15515. fprintf(stream, "# Timings #\n");
  15516. fprintf(stream, "###########\n");
  15517. fprintf(stream, "\n");
  15518. fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
  15519. 1.0e-3 * ctx->t_eval_us / ctx->n_eval);
  15520. fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
  15521. 1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
  15522. fprintf(stream, "mst_sample: %.2f # ms / token during sampling\n",
  15523. 1.0e-3 * ctx->t_sample_us / ctx->n_sample);
  15524. fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
  15525. fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
  15526. fprintf(stream, "n_sample: %d # number of sampled tokens\n", ctx->n_sample);
  15527. fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
  15528. fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
  15529. fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
  15530. fprintf(stream, "t_sample_us: %" PRId64 " # total microseconds spent sampling\n", ctx->t_sample_us);
  15531. fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
  15532. 1.0e6 * ctx->n_eval / ctx->t_eval_us);
  15533. fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
  15534. 1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
  15535. fprintf(stream, "ts_sample: %.2f # tokens / second during sampling\n",
  15536. 1.0e6 * ctx->n_sample / ctx->t_sample_us);
  15537. }
  15538. // For internal test use
  15539. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
  15540. struct llama_context * ctx
  15541. ) {
  15542. return ctx->model.tensors_by_name;
  15543. }
  15544. void llama_log_set(ggml_log_callback log_callback, void * user_data) {
  15545. g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
  15546. g_state.log_callback_user_data = user_data;
  15547. #ifdef GGML_USE_METAL
  15548. ggml_backend_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
  15549. #elif defined(GGML_USE_CUDA)
  15550. ggml_backend_cuda_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
  15551. #endif
  15552. }
  15553. static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
  15554. va_list args_copy;
  15555. va_copy(args_copy, args);
  15556. char buffer[128];
  15557. int len = vsnprintf(buffer, 128, format, args);
  15558. if (len < 128) {
  15559. g_state.log_callback(level, buffer, g_state.log_callback_user_data);
  15560. } else {
  15561. char* buffer2 = new char[len+1];
  15562. vsnprintf(buffer2, len+1, format, args_copy);
  15563. buffer2[len] = 0;
  15564. g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
  15565. delete[] buffer2;
  15566. }
  15567. va_end(args_copy);
  15568. }
  15569. static void llama_log_internal(ggml_log_level level, const char * format, ...) {
  15570. va_list args;
  15571. va_start(args, format);
  15572. llama_log_internal_v(level, format, args);
  15573. va_end(args);
  15574. }
  15575. static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
  15576. (void) level;
  15577. (void) user_data;
  15578. fputs(text, stderr);
  15579. fflush(stderr);
  15580. }